text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
'use client' import { useState, useEffect, useRef, useCallback } from 'react' export default function Home() { // Keep track of the classification result and the model loading status. const [result, setResult] = useState(null); const [ready, setReady] = useState(null); // Create a reference to the worker object. const worker = useRef(null); // We use the `useEffect` hook to set up the worker as soon as the `App` component is mounted. useEffect(() => { if (!worker.current) { // Create the worker if it does not yet exist. worker.current = new Worker(new URL('./worker.js', import.meta.url), { type: 'module' }); } // Create a callback function for messages from the worker thread. const onMessageReceived = (e) => { switch (e.data.status) { case 'initiate': setReady(false); break; case 'ready': setReady(true); break; case 'complete': setResult(e.data.output[0]) break; } }; // Attach the callback function as an event listener. worker.current.addEventListener('message', onMessageReceived); // Define a cleanup function for when the component is unmounted. return () => worker.current.removeEventListener('message', onMessageReceived); }); const classify = useCallback((text) => { if (worker.current) { worker.current.postMessage({ text }); } }, []); return ( <main className="flex min-h-screen flex-col items-center justify-center p-12"> <h1 className="text-5xl font-bold mb-2 text-center">Transformers.js</h1> <h2 className="text-2xl mb-4 text-center">Next.js template (client-side)</h2> <input type="text" className="w-full max-w-xs p-2 border border-gray-300 rounded mb-4" placeholder="Enter text here" onInput={e => { classify(e.target.value); }} /> {ready !== null && ( <pre className="bg-gray-100 p-2 rounded"> { (!ready || !result) ? 'Loading...' : JSON.stringify(result, null, 2)} </pre> )} </main> ) }
transformers.js/examples/next-client/src/app/page.js/0
{ "file_path": "transformers.js/examples/next-client/src/app/page.js", "repo_id": "transformers.js", "token_count": 858 }
import { useEffect, useRef, useState } from 'react' import LanguageSelector from './components/LanguageSelector'; import Progress from './components/Progress'; import './App.css' function App() { // Model loading const [ready, setReady] = useState(null); const [disabled, setDisabled] = useState(false); const [progressItems, setProgressItems] = useState([]); // Inputs and outputs const [input, setInput] = useState('I love walking my dog.'); const [sourceLanguage, setSourceLanguage] = useState('eng_Latn'); const [targetLanguage, setTargetLanguage] = useState('fra_Latn'); const [output, setOutput] = useState(''); // Create a reference to the worker object. const worker = useRef(null); // We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted. useEffect(() => { if (!worker.current) { // Create the worker if it does not yet exist. worker.current = new Worker(new URL('./worker.js', import.meta.url), { type: 'module' }); } // Create a callback function for messages from the worker thread. const onMessageReceived = (e) => { switch (e.data.status) { case 'initiate': // Model file start load: add a new progress item to the list. setReady(false); setProgressItems(prev => [...prev, e.data]); break; case 'progress': // Model file progress: update one of the progress items. setProgressItems( prev => prev.map(item => { if (item.file === e.data.file) { return { ...item, progress: e.data.progress } } return item; }) ); break; case 'done': // Model file loaded: remove the progress item from the list. setProgressItems( prev => prev.filter(item => item.file !== e.data.file) ); break; case 'ready': // Pipeline ready: the worker is ready to accept messages. setReady(true); break; case 'update': // Generation update: update the output text. setOutput(e.data.output); break; case 'complete': // Generation complete: re-enable the "Translate" button setDisabled(false); break; } }; // Attach the callback function as an event listener. worker.current.addEventListener('message', onMessageReceived); // Define a cleanup function for when the component is unmounted. return () => worker.current.removeEventListener('message', onMessageReceived); }); const translate = () => { setDisabled(true); worker.current.postMessage({ text: input, src_lang: sourceLanguage, tgt_lang: targetLanguage, }); } return ( <> <h1>Transformers.js</h1> <h2>ML-powered multilingual translation in React!</h2> <div className='container'> <div className='language-container'> <LanguageSelector type={"Source"} defaultLanguage={"eng_Latn"} onChange={x => setSourceLanguage(x.target.value)} /> <LanguageSelector type={"Target"} defaultLanguage={"fra_Latn"} onChange={x => setTargetLanguage(x.target.value)} /> </div> <div className='textbox-container'> <textarea value={input} rows={3} onChange={e => setInput(e.target.value)}></textarea> <textarea value={output} rows={3} readOnly></textarea> </div> </div> <button disabled={disabled} onClick={translate}>Translate</button> <div className='progress-bars-container'> {ready === false && ( <label>Loading models... (only run once)</label> )} {progressItems.map(data => ( <div key={data.file}> <Progress text={data.file} percentage={data.progress} /> </div> ))} </div> </> ) } export default App
transformers.js/examples/react-translator/src/App.jsx/0
{ "file_path": "transformers.js/examples/react-translator/src/App.jsx", "repo_id": "transformers.js", "token_count": 1595 }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <link rel="stylesheet" href="index.css" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>Transformers.js - Segment Anything WebGPU</title> </head> <body> <h1>Segment Anything WebGPU</h1> <h3>In-browser image segmentation w/ <a href="https://hf.co/docs/transformers.js" target="_blank">🤗 Transformers.js</a></h3> <div id="container"> <label id="upload-button" for="upload"> <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg"> <path fill="#000" d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z"> </path> </svg> Click to upload image <label id="example">(or try example)</label> </label> <canvas id="mask-output"></canvas> </div> <label id="status"></label> <div id="controls"> <button id="reset-image">Reset image</button> <button id="clear-points">Clear points</button> <button id="cut-mask" disabled>Cut mask</button> </div> <p id="information"> Left click = positive points, right click = negative points. </p> <input id="upload" type="file" accept="image/*" /> <script src="index.js" type="module"></script> </body> </html>
transformers.js/examples/segment-anything-client/index.html/0
{ "file_path": "transformers.js/examples/segment-anything-client/index.html", "repo_id": "transformers.js", "token_count": 893 }
import { downloadImage } from '../utils.js' import Image from 'next/image' export function Modal({ currentImage, setCurrentImage }) { return ( <div className='fixed inset-0 z-30 backdrop-blur-2xl w-full h-full bg-black top-0 left-0 transition' style={{ backgroundColor: `rgba(0, 0, 0, ${currentImage ? 0.8 : 0})`, opacity: currentImage ? 1 : 0, pointerEvents: currentImage ? 'auto' : 'none', }} > {currentImage && <> <Image alt={currentImage.photo_description || currentImage.ai_description || ""} className="transform rounded-lg transition will-change-auto" style={ { transform: 'translate3d(0, 0, 0)', } } layout={'fill'} objectFit={'contain'} src={currentImage.photo_image_url} unoptimized={true} /> <div className='absolute top-0 left-0 flex items-center gap-2 p-3 text-white' > <button onClick={() => setCurrentImage(null)} className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white"> <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5"> <path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12"></path> </svg> </button> </div> <div className="absolute top-0 right-0 flex items-center gap-2 p-3 text-white"> <a href={currentImage.photo_url} className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white" target="_blank" title="View on Unsplash" rel="noreferrer"> <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5"> <path strokeLinecap="round" strokeLinejoin="round" d="M13.5 6H5.25A2.25 2.25 0 003 8.25v10.5A2.25 2.25 0 005.25 21h10.5A2.25 2.25 0 0018 18.75V10.5m-10.5 6L21 3m0 0h-5.25M21 3v5.25"></path> </svg> </a> <button onClick={() => downloadImage(currentImage.photo_image_url, `${currentImage.photo_id}.png`)} className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white" title="Download"> <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5"> <path strokeLinecap="round" strokeLinejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5M16.5 12L12 16.5m0 0L7.5 12m4.5 4.5V3"> </path> </svg> </button> </div> </> } </div>) }
transformers.js/examples/semantic-image-search/src/app/components/Modal.jsx/0
{ "file_path": "transformers.js/examples/semantic-image-search/src/app/components/Modal.jsx", "repo_id": "transformers.js", "token_count": 1950 }
import { useEffect, useRef } from "react"; export default function AudioPlayer({ audioUrl, mimeType }) { const audioPlayer = useRef(null); const audioSource = useRef(null); // Updates src when url changes useEffect(() => { if (audioPlayer.current && audioSource.current) { audioSource.current.src = audioUrl; audioPlayer.current.load(); } }, [audioUrl]); return ( <div className='flex relative z-10 my-4 w-full'> <audio ref={audioPlayer} controls className='w-full h-14 rounded-lg bg-white shadow-xl shadow-black/5 ring-1 ring-slate-700/10' > <source ref={audioSource} type={mimeType}></source> </audio> </div> ); }
transformers.js/examples/text-to-speech-client/src/components/AudioPlayer.jsx/0
{ "file_path": "transformers.js/examples/text-to-speech-client/src/components/AudioPlayer.jsx", "repo_id": "transformers.js", "token_count": 369 }
#root { max-width: 1280px; width: 100%; margin: 0 auto; padding: 2rem; text-align: center; display: flex; justify-content: center; align-items: center; flex-direction: column; }
transformers.js/examples/tokenizer-playground/src/App.css/0
{ "file_path": "transformers.js/examples/tokenizer-playground/src/App.css", "repo_id": "transformers.js", "token_count": 73 }
export default function UserIcon(props) { return ( <svg {...props} xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round" > <path d="M19 21v-2a4 4 0 0 0-4-4H9a4 4 0 0 0-4 4v2" /> <circle cx="12" cy="7" r="4" /> </svg> ) }
transformers.js/examples/webgpu-vlm/src/components/icons/UserIcon.jsx/0
{ "file_path": "transformers.js/examples/webgpu-vlm/src/components/icons/UserIcon.jsx", "repo_id": "transformers.js", "token_count": 313 }
def generate_tokenizer_json(tokenizer): vocab = tokenizer.vocab special_tokens_vocab = vocab if "<pad>" not in tokenizer.vocab: # For MMS tokenizers, the vocab is of the form: # { # language_id: { language_vocab } # } # So, to get the list of special tokens, we just get the english vocab special_tokens_vocab = vocab['eng'] tokenizer_json = { "version": "1.0", "truncation": None, "padding": None, "added_tokens": [ { "id": v, "content": k, "single_word": False, "lstrip": True, "rstrip": True, "normalized": False, "special": True } for k, v in special_tokens_vocab.items() if k.startswith('<') and k.endswith('>') ], "normalizer": { "type": "Replace", "pattern": { "String": " " }, "content": "|" }, "pre_tokenizer": { "type": "Split", "pattern": { "Regex": "" }, "behavior": "Isolated", "invert": False }, "post_processor": None, "decoder": { "type": "CTC", "pad_token": "<pad>", "word_delimiter_token": "|", "cleanup": True }, "model": { "vocab": vocab } } return tokenizer_json
transformers.js/scripts/extra/wav2vec2.py/0
{ "file_path": "transformers.js/scripts/extra/wav2vec2.py", "repo_id": "transformers.js", "token_count": 890 }
/** * @file Definitions of all models available in Transformers.js. * * **Example:** Load and run an `AutoModel`. * * ```javascript * import { AutoModel, AutoTokenizer } from '@huggingface/transformers'; * * let tokenizer = await AutoTokenizer.from_pretrained('Xenova/bert-base-uncased'); * let model = await AutoModel.from_pretrained('Xenova/bert-base-uncased'); * * let inputs = await tokenizer('I love transformers!'); * let { logits } = await model(inputs); * // Tensor { * // data: Float32Array(183132) [-7.117443084716797, -7.107812881469727, -7.092104911804199, ...] * // dims: (3) [1, 6, 30522], * // type: "float32", * // size: 183132, * // } * ``` * * We also provide other `AutoModel`s (listed below), which you can use in the same way as the Python library. For example: * * **Example:** Load and run an `AutoModelForSeq2SeqLM`. * ```javascript * import { AutoModelForSeq2SeqLM, AutoTokenizer } from '@huggingface/transformers'; * * let tokenizer = await AutoTokenizer.from_pretrained('Xenova/t5-small'); * let model = await AutoModelForSeq2SeqLM.from_pretrained('Xenova/t5-small'); * * let { input_ids } = await tokenizer('translate English to German: I love transformers!'); * let outputs = await model.generate(input_ids); * let decoded = tokenizer.decode(outputs[0], { skip_special_tokens: true }); * // 'Ich liebe Transformatoren!' * ``` * * @module models */ import { AutoConfig, getKeyValueShapes, } from './configs.js'; import { deviceToExecutionProviders, createInferenceSession, isONNXTensor, isONNXProxy, } from './backends/onnx.js'; import { DATA_TYPES, DEFAULT_DEVICE_DTYPE_MAPPING, DEFAULT_DTYPE_SUFFIX_MAPPING, isWebGpuFp16Supported, } from './utils/dtypes.js'; import { Callable, } from './utils/generic.js'; import { mergeArrays, pick, } from './utils/core.js'; import { getModelFile, getModelJSON, } from './utils/hub.js'; import { GITHUB_ISSUE_URL, } from './utils/constants.js'; import { LogitsProcessorList, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, SuppressTokensAtBeginLogitsProcessor, WhisperTimeStampLogitsProcessor, NoRepeatNGramLogitsProcessor, RepetitionPenaltyLogitsProcessor, NoBadWordsLogitsProcessor, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ClassifierFreeGuidanceLogitsProcessor, } from './generation/logits_process.js'; import { GenerationConfig, } from './generation/configuration_utils.js'; import { cat, mean, zeros, zeros_like, ones, ones_like, full, full_like, stack, std_mean, Tensor, } from './utils/tensor.js'; import { RawImage } from './utils/image.js'; import { dynamic_time_warping, max, medianFilter } from './utils/maths.js'; import { EosTokenCriteria, MaxLengthCriteria, StoppingCriteriaList } from './generation/stopping_criteria.js'; import { LogitsSampler } from './generation/logits_sampler.js'; import { apis } from './env.js'; import { WhisperGenerationConfig } from './models/whisper/generation_whisper.js'; import { whisper_language_to_code } from './models/whisper/common_whisper.js'; ////////////////////////////////////////////////// // Model types: used internally const MODEL_TYPES = { EncoderOnly: 0, EncoderDecoder: 1, Seq2Seq: 2, Vision2Seq: 3, DecoderOnly: 4, MaskGeneration: 5, ImageTextToText: 6, Musicgen: 7, MultiModality: 8, Phi3V: 9, } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Helper functions // NOTE: These will be populated fully later const MODEL_TYPE_MAPPING = new Map(); const MODEL_NAME_TO_CLASS_MAPPING = new Map(); const MODEL_CLASS_TO_NAME_MAPPING = new Map(); /** * Constructs an InferenceSession using a model file located at the specified path. * @param {string} pretrained_model_name_or_path The path to the directory containing the model file. * @param {string} fileName The name of the model file. * @param {import('./utils/hub.js').PretrainedModelOptions} options Additional options for loading the model. * @returns {Promise<{buffer: Uint8Array, session_options: Object, session_config: Object}>} A Promise that resolves to the data needed to create an InferenceSession object. * @private */ async function getSession(pretrained_model_name_or_path, fileName, options) { const custom_config = options.config?.['transformers.js_config'] ?? {}; let device = options.device ?? custom_config.device; if (device && typeof device !== 'string') { if (device.hasOwnProperty(fileName)) { device = device[fileName]; } else { console.warn(`device not specified for "${fileName}". Using the default device.`); device = null; } } // If the device is not specified, we use the default (supported) execution providers. const selectedDevice = /** @type {import("./utils/devices.js").DeviceType} */( device ?? (apis.IS_NODE_ENV ? 'cpu' : 'wasm') ); const executionProviders = deviceToExecutionProviders(selectedDevice); // If options.dtype is specified, we use it to choose the suffix for the model file. // Otherwise, we use the default dtype for the device. let dtype = options.dtype ?? custom_config.dtype; if (typeof dtype !== 'string') { if (dtype && dtype.hasOwnProperty(fileName)) { dtype = dtype[fileName]; } else { dtype = DEFAULT_DEVICE_DTYPE_MAPPING[selectedDevice] ?? DATA_TYPES.fp32; console.warn(`dtype not specified for "${fileName}". Using the default dtype (${dtype}) for this device (${selectedDevice}).`); } } if (dtype === DATA_TYPES.auto) { // Try to choose the auto dtype based on the custom config let config_dtype = custom_config.dtype; if (typeof config_dtype !== 'string') { config_dtype = config_dtype[fileName]; } if (config_dtype && config_dtype !== DATA_TYPES.auto && DATA_TYPES.hasOwnProperty(config_dtype)) { // Defined by the custom config, and is not "auto" dtype = config_dtype; } else { // Choose default dtype based on device, falling back to fp32 dtype = DEFAULT_DEVICE_DTYPE_MAPPING[selectedDevice] ?? DATA_TYPES.fp32; } } const selectedDtype = /** @type {import("./utils/dtypes.js").DataType} */(dtype); if (!DEFAULT_DTYPE_SUFFIX_MAPPING.hasOwnProperty(selectedDtype)) { throw new Error(`Invalid dtype: ${selectedDtype}. Should be one of: ${Object.keys(DATA_TYPES).join(', ')}`); } else if (selectedDtype === DATA_TYPES.fp16 && selectedDevice === 'webgpu' && !(await isWebGpuFp16Supported())) { throw new Error(`The device (${selectedDevice}) does not support fp16.`); } // Only valid for models with a decoder const kv_cache_dtype = custom_config.kv_cache_dtype ? (typeof custom_config.kv_cache_dtype === 'string' ? custom_config.kv_cache_dtype : custom_config.kv_cache_dtype[selectedDtype] ?? 'float32') : undefined; if (kv_cache_dtype && !['float32', 'float16'].includes(kv_cache_dtype)) { throw new Error(`Invalid kv_cache_dtype: ${kv_cache_dtype}. Should be one of: float32, float16`); } const session_config = { dtype: selectedDtype, kv_cache_dtype, } // Construct the model file name const suffix = DEFAULT_DTYPE_SUFFIX_MAPPING[selectedDtype]; const modelFileName = `${options.subfolder ?? ''}/${fileName}${suffix}.onnx`; const session_options = { ...options.session_options }; // Overwrite `executionProviders` if not specified session_options.executionProviders ??= executionProviders; // Overwrite `freeDimensionOverrides` if specified in config and not set in session options const free_dimension_overrides = custom_config.free_dimension_overrides; if (free_dimension_overrides) { session_options.freeDimensionOverrides ??= free_dimension_overrides; } else if (selectedDevice.startsWith('webnn') && !session_options.freeDimensionOverrides) { console.warn( 'WebNN does not currently support dynamic shapes and requires `free_dimension_overrides` to be set in config.json as a field within "transformers.js_config". ' + 'When `free_dimension_overrides` is not set, you may experience significant performance degradation.' ); } const bufferPromise = getModelFile(pretrained_model_name_or_path, modelFileName, true, options); // handle onnx external data files const use_external_data_format = options.use_external_data_format ?? custom_config.use_external_data_format; /** @type {Promise<{path: string, data: Uint8Array}>[]} */ let externalDataPromises = []; if (use_external_data_format && ( use_external_data_format === true || ( typeof use_external_data_format === 'object' && use_external_data_format.hasOwnProperty(fileName) && use_external_data_format[fileName] === true ) )) { if (apis.IS_NODE_ENV) { throw new Error('External data format is not yet supported in Node.js'); } const path = `${fileName}${suffix}.onnx_data`; const fullPath = `${options.subfolder ?? ''}/${path}`; externalDataPromises.push(new Promise(async (resolve, reject) => { const data = await getModelFile(pretrained_model_name_or_path, fullPath, true, options); resolve({ path, data }) })); } else if (session_options.externalData !== undefined) { externalDataPromises = session_options.externalData.map(async (ext) => { // if the external data is a string, fetch the file and replace the string with its content // @ts-expect-error TS2339 if (typeof ext.data === "string") { // @ts-expect-error TS2339 const ext_buffer = await getModelFile(pretrained_model_name_or_path, ext.data, true, options); // @ts-expect-error TS2698 return { ...ext, data: ext_buffer }; } return ext; }); } if (externalDataPromises.length > 0) { session_options.externalData = await Promise.all(externalDataPromises); } if (selectedDevice === 'webgpu') { const shapes = getKeyValueShapes(options.config, { prefix: 'present', }); if (Object.keys(shapes).length > 0 && !isONNXProxy()) { // Only set preferredOutputLocation if shapes are present and we aren't proxying ONNX /** @type {Record<string, import('onnxruntime-common').Tensor.DataLocation>} */ const preferredOutputLocation = {}; for (const key in shapes) { preferredOutputLocation[key] = 'gpu-buffer'; } session_options.preferredOutputLocation = preferredOutputLocation; } } const buffer = await bufferPromise; return { buffer, session_options, session_config }; } /** * Helper function to create multiple InferenceSession objects. * * @param {string} pretrained_model_name_or_path The path to the directory containing the model file. * @param {Record<string, string>} names The names of the model files to load. * @param {import('./utils/hub.js').PretrainedModelOptions} options Additional options for loading the model. * @returns {Promise<Record<string, any>>} A Promise that resolves to a dictionary of InferenceSession objects. * @private */ async function constructSessions(pretrained_model_name_or_path, names, options) { return Object.fromEntries(await Promise.all( Object.keys(names).map(async (name) => { const { buffer, session_options, session_config } = await getSession(pretrained_model_name_or_path, names[name], options); const session = await createInferenceSession(buffer, session_options, session_config); return [name, session]; }) )); } /** * Helper function to load multiple optional configuration files * @param {string} pretrained_model_name_or_path The path to the directory containing the config file. * @param {Record<string, string>} names The names of the config files to load. * @param {import('./utils/hub.js').PretrainedModelOptions} options Additional options for loading the configs. * @returns {Promise<Record<string, any>>} A Promise that resolves to a dictionary of configuration objects. * @private */ async function getOptionalConfigs(pretrained_model_name_or_path, names, options) { return Object.fromEntries(await Promise.all( Object.keys(names).map(async (name) => { const config = await getModelJSON(pretrained_model_name_or_path, names[name], false, options); return [name, config]; }) )); } /** * Validate model inputs * @param {Object} session The InferenceSession object that will be run. * @param {Object} inputs The inputs to check. * @returns {Record<string, Tensor>} The checked inputs. * @throws {Error} If any inputs are missing. * @private */ function validateInputs(session, inputs) { /** * NOTE: Create either a shallow or deep copy based on `onnx.wasm.proxy` * @type {Record<string, Tensor>} */ const checkedInputs = Object.create(null); const missingInputs = []; for (const inputName of session.inputNames) { const tensor = inputs[inputName]; // Rare case where one of the model's input names corresponds to a built-in // object name (e.g., toString), which would cause a simple (!tensor) check to fail, // because it's not undefined but a function. if (!(tensor instanceof Tensor)) { missingInputs.push(inputName); continue; } // NOTE: When `env.wasm.proxy is true` the tensor is moved across the Worker // boundary, transferring ownership to the worker and invalidating the tensor. // So, in this case, we simply sacrifice a clone for it. checkedInputs[inputName] = isONNXProxy() ? tensor.clone() : tensor; } if (missingInputs.length > 0) { throw new Error( `An error occurred during model execution: "Missing the following inputs: ${missingInputs.join(', ')}.`); } const numInputsProvided = Object.keys(inputs).length; const numInputsNeeded = session.inputNames.length; if (numInputsProvided > numInputsNeeded) { // No missing inputs, but too many inputs were provided. // Warn the user and ignore the extra inputs. let ignored = Object.keys(inputs).filter(inputName => !session.inputNames.includes(inputName)); console.warn(`WARNING: Too many inputs were provided (${numInputsProvided} > ${numInputsNeeded}). The following inputs will be ignored: "${ignored.join(', ')}".`); } return checkedInputs; } /** * Executes an InferenceSession using the specified inputs. * NOTE: `inputs` must contain at least the input names of the model. * - If additional inputs are passed, they will be ignored. * - If inputs are missing, an error will be thrown. * * @param {Object} session The InferenceSession object to run. * @param {Object} inputs An object that maps input names to input tensors. * @returns {Promise<Object>} A Promise that resolves to an object that maps output names to output tensors. * @private */ async function sessionRun(session, inputs) { const checkedInputs = validateInputs(session, inputs); try { // pass the original ort tensor const ortFeed = Object.fromEntries(Object.entries(checkedInputs).map(([k, v]) => [k, v.ort_tensor])); let output = await session.run(ortFeed); output = replaceTensors(output); return output; } catch (e) { // Error messages can be long (nested) and uninformative. For this reason, // we apply minor formatting to show the most important information const formatted = Object.fromEntries(Object.entries(checkedInputs) .map(([k, { type, dims, data }]) => [k, { // Extract these properties from the underlying ORT tensor type, dims, data, }])); // This usually occurs when the inputs are of the wrong type. console.error(`An error occurred during model execution: "${e}".`); console.error('Inputs given to model:', formatted); throw e; } } /** * Replaces ONNX Tensor objects with custom Tensor objects to support additional functions. * @param {Object} obj The object to replace tensor objects in. * @returns {Object} The object with tensor objects replaced by custom Tensor objects. * @private */ function replaceTensors(obj) { for (let prop in obj) { if (isONNXTensor(obj[prop])) { obj[prop] = new Tensor(obj[prop]); } else if (typeof obj[prop] === 'object') { replaceTensors(obj[prop]); } } return obj; } /** * Converts an array or Tensor of integers to an int64 Tensor. * @param {any[]|Tensor} items The input integers to be converted. * @returns {Tensor} The int64 Tensor with the converted values. * @throws {Error} If the input array is empty or the input is a batched Tensor and not all sequences have the same length. * @private */ function toI64Tensor(items) { if (items instanceof Tensor) { return items; } // items is an array if (items.length === 0) { throw Error("items must be non-empty"); } if (Array.isArray(items[0])) { // batched if (items.some(x => x.length !== items[0].length)) { throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' and/or 'truncation=True' to have batched tensors with the same length.") } return new Tensor('int64', BigInt64Array.from(items.flat().map(x => BigInt(x))), [items.length, items[0].length] ); } else { //flat return new Tensor('int64', BigInt64Array.from(items.map(x => BigInt(x))), [1, items.length] ); } } /** * Creates a boolean tensor with a single value. * @param {boolean} value The value of the tensor. * @returns {Tensor} The boolean tensor. * @private */ function boolTensor(value) { return new Tensor('bool', [value], [1]); } // JS doesn't support mixins, so we define some reused functions here, and allow "this" to be passed in /** * Perform forward pass on the seq2seq model (both encoder and decoder). * @param {Object} self The seq2seq model object. * @param {Object} model_inputs The input object for the model containing encoder and decoder inputs. * @returns {Promise<Seq2SeqLMOutput>} Promise that resolves with the output of the seq2seq model. * @private */ async function seq2seqForward(self, model_inputs) { let { encoder_outputs, input_ids, decoder_input_ids, ...other_decoder_inputs } = model_inputs; // Encode if needed if (!encoder_outputs) { const encoder_inputs = pick(model_inputs, self.sessions['model'].inputNames); // Encoder outputs are not given, so we must compute them. encoder_outputs = (await encoderForward(self, encoder_inputs)).last_hidden_state; } other_decoder_inputs.input_ids = decoder_input_ids; other_decoder_inputs.encoder_hidden_states = encoder_outputs; if (self.sessions['decoder_model_merged'].inputNames.includes('encoder_attention_mask')) { other_decoder_inputs.encoder_attention_mask = model_inputs.attention_mask } const decoderResults = await decoderForward(self, other_decoder_inputs, true); return decoderResults; } /** * Forward pass of an encoder model. * @param {Object} self The encoder model. * @param {Object} model_inputs The input data to be used for the forward pass. * @returns {Promise<Object>} The model's outputs. * @private */ async function encoderForward(self, model_inputs) { const session = self.sessions['model']; const encoderFeeds = pick(model_inputs, session.inputNames); if (session.inputNames.includes('inputs_embeds') && !encoderFeeds.inputs_embeds) { if (!model_inputs.input_ids) { throw new Error('Both `input_ids` and `inputs_embeds` are missing in the model inputs.'); } encoderFeeds.inputs_embeds = await self.encode_text({ input_ids: model_inputs.input_ids }); } if (session.inputNames.includes('token_type_ids') && !encoderFeeds.token_type_ids) { if (!encoderFeeds.input_ids) { throw new Error('Both `input_ids` and `token_type_ids` are missing in the model inputs.'); } // Assign default `token_type_ids` (all zeroes) to the `encoderFeeds` if the model expects it, // but they weren't created by the tokenizer. encoderFeeds.token_type_ids = zeros_like(encoderFeeds.input_ids); } if (session.inputNames.includes('pixel_mask') && !encoderFeeds.pixel_mask) { if (!encoderFeeds.pixel_values) { throw new Error('Both `pixel_values` and `pixel_mask` are missing in the model inputs.'); } // Assign default `pixel_mask` (all ones) to the `encoderFeeds` if the model expects it, // but they weren't created by the processor. const dims = encoderFeeds.pixel_values.dims; encoderFeeds.pixel_mask = ones([dims[0], dims[2], dims[3]]); } return await sessionRun(session, encoderFeeds); } /** * Forward pass of a decoder model. * @param {Object} self The decoder model. * @param {Object} model_inputs The input data to be used for the forward pass. * @returns {Promise<Object>} The logits and past key values. * @private */ async function decoderForward(self, model_inputs, is_encoder_decoder = false) { const session = self.sessions[ is_encoder_decoder ? 'decoder_model_merged' : 'model' ] const { past_key_values, ...new_model_inputs } = model_inputs; if (session.inputNames.includes('use_cache_branch')) { new_model_inputs.use_cache_branch = boolTensor(!!past_key_values); } if (session.inputNames.includes('position_ids') && new_model_inputs.attention_mask && !new_model_inputs.position_ids) { // NOTE: Handle a special case for paligemma models, where positions are 1-indexed const start_index = self.config.model_type === 'paligemma' ? 1 : 0; new_model_inputs.position_ids = createPositionIds(new_model_inputs, past_key_values, start_index); } // Unpack the `past_key_values` object into model inputs self.addPastKeyValues(new_model_inputs, past_key_values); // Select only the inputs that are needed for the current session const fixed = pick(new_model_inputs, session.inputNames); return await sessionRun(session, fixed); } function default_merge_input_ids_with_image_features({ image_token_id, inputs_embeds, image_features, input_ids, attention_mask, }) { const image_tokens = input_ids.tolist().map(ids => ids.reduce((acc, x, idx) => { if (x == image_token_id) acc.push(idx); return acc; }, []) ); const n_image_tokens = image_tokens.reduce((acc, x) => acc + x.length, 0); const n_image_features = image_features.dims[0]; if (n_image_tokens !== n_image_features) { throw new Error(`Image features and image tokens do not match: tokens: ${n_image_tokens}, features ${n_image_features}`); } // Equivalent to performing a masked_scatter let img = 0; for (let i = 0; i < image_tokens.length; ++i) { const tokens = image_tokens[i]; const embeds = inputs_embeds[i]; for (let j = 0; j < tokens.length; ++j) { embeds[tokens[j]].data.set(image_features[img++].data) } } return { inputs_embeds, attention_mask } } /** * Forward pass of an image-text-to-text model. * @param {Object} self The image-text-to-text model model. * @param {Object} model_inputs The input data to be used for the forward pass. * @param {Tensor} [model_inputs.input_ids=null] * @param {Tensor} [model_inputs.attention_mask=null] * @param {Tensor} [model_inputs.pixel_values=null] * @param {Tensor} [model_inputs.position_ids=null] * @param {Tensor} [model_inputs.inputs_embeds=null] * @param {Tensor} [model_inputs.past_key_values=null] * @param {Object} [model_inputs.generation_config=null] * @param {Object} [model_inputs.logits_processor=null] * @returns {Promise<Tensor>} The model's output tensor * @private */ async function imageTextToTextForward(self, { // Produced by the tokenizer/processor: input_ids = null, attention_mask = null, pixel_values = null, // Used during generation: position_ids = null, inputs_embeds = null, past_key_values = null, // Generic generation parameters generation_config = null, logits_processor = null, // TODO: needed? ...kwargs }) { if (!inputs_embeds) { // 1. Extract the input embeddings inputs_embeds = await self.encode_text({ input_ids, ...kwargs }); // 2. Possibly, merge text and images if (pixel_values && input_ids.dims[1] !== 1) { const image_features = await self.encode_image({ pixel_values, ...kwargs }); ({ inputs_embeds, attention_mask } = self._merge_input_ids_with_image_features({ image_features, inputs_embeds, input_ids, attention_mask, })); } else if (past_key_values && pixel_values && input_ids.dims[1] === 1) { // This is the case when we are generating with cache const target_length = input_ids.dims[1]; // always 1 const past_length = Object.values(past_key_values)[0].dims.at(-2); attention_mask = cat([ ones([input_ids.dims[0], past_length]), attention_mask.slice(null, [attention_mask.dims[1] - target_length, attention_mask.dims[1]]), ], 1); } } if (!position_ids) { if (self.config.model_type === 'qwen2_vl') { // Special case for qwen2_vl models // @ts-ignore const { image_grid_thw, video_grid_thw } = kwargs; [position_ids] = self.get_rope_index(input_ids, image_grid_thw, video_grid_thw, attention_mask) } } const outputs = await decoderForward(self, { inputs_embeds, past_key_values, attention_mask, position_ids, generation_config, logits_processor, }, true); return outputs; } /** * Helper function to perform the following: * ```python * x = attention_mask.long().cumsum(-1) - 1 * x.masked_fill_(attention_mask == 0, 1) * ``` * @param {Tensor} attention_mask * @returns {{data: BigInt64Array, dims: number[]}} */ function cumsum_masked_fill(attention_mask, start_index = 0) { const [bz, seq_len] = attention_mask.dims; const attn_mask_data = attention_mask.data; const data = new BigInt64Array(attn_mask_data.length); for (let i = 0; i < bz; ++i) { const start = i * seq_len; let sum = BigInt(start_index); for (let j = 0; j < seq_len; ++j) { const index = start + j; if (attn_mask_data[index] === 0n) { data[index] = BigInt(1); } else { // === 1n data[index] = sum; sum += attn_mask_data[index]; } } } return { data, dims: attention_mask.dims }; } /** * If the model supports providing position_ids, we create position_ids on the fly for batch generation, * by computing the cumulative sum of the attention mask along the sequence length dimension. * * Equivalent to: * ```python * position_ids = attention_mask.long().cumsum(-1) - 1 * position_ids.masked_fill_(attention_mask == 0, 1) * if past_key_values: * position_ids = position_ids[:, -input_ids.shape[1] :] * ``` */ function createPositionIds(model_inputs, past_key_values = null, start_index = 0) { const { input_ids, inputs_embeds, attention_mask } = model_inputs; const { data, dims } = cumsum_masked_fill(attention_mask, start_index); let position_ids = new Tensor('int64', data, dims); if (past_key_values) { const offset = -(input_ids ?? inputs_embeds).dims.at(1); position_ids = position_ids.slice(null, [offset, null]); } return position_ids; } function decoder_prepare_inputs_for_generation(self, input_ids, model_inputs, generation_config) { if (model_inputs.past_key_values) { const past_length = Object.values(model_inputs.past_key_values)[0].dims.at(-2); const { input_ids, attention_mask } = model_inputs; // Keep only the unprocessed tokens: // 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where // some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as // input) if (attention_mask && attention_mask.dims[1] > input_ids.dims[1]) { // NOTE: not needed since we only pass the generated tokens to the next forward pass // const offset = -(attention_mask.dims[1] - past_length); // model_inputs.input_ids = input_ids.slice(null, [offset, null]); } // 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. // We can discard input_ids based on the past_length. else if (past_length < input_ids.dims[1]) { // NOTE: Required for phi models. // See https://github.com/huggingface/transformers/issues/30809#issuecomment-2111918479 for more information. model_inputs.input_ids = input_ids.slice(null, [past_length, null]); } // 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. else { if ( // NOTE: Only used by VLMs (!= so that null matches undefined) self.config.image_token_index != null && // Equivalent to `self.config.image_token_index in input_ids` (== so that int matches bigint) input_ids.data.some(x => x == self.config.image_token_index) ) { // TODO: Support multiple image tokens const num_image_tokens = self.config.num_image_tokens; if (!num_image_tokens) { throw new Error('`num_image_tokens` is missing in the model configuration.'); } const num_new_tokens = input_ids.dims[1] - (past_length - num_image_tokens); model_inputs.input_ids = input_ids.slice(null, [-num_new_tokens, null]); // TODO: The attention mask should be formed from the attention mask passed in model_inputs model_inputs.attention_mask = ones([1, past_length + num_new_tokens]); } } } return model_inputs; } function encoder_decoder_prepare_inputs_for_generation(self, input_ids, model_inputs, generation_config) { if (model_inputs.past_key_values) { input_ids = input_ids.map(x => [x.at(-1)]); } return { ...model_inputs, decoder_input_ids: toI64Tensor(input_ids), }; } function image_text_to_text_prepare_inputs_for_generation(self, ...args) { if (self.config.is_encoder_decoder) { return encoder_decoder_prepare_inputs_for_generation(self, ...args); } else { return decoder_prepare_inputs_for_generation(self, ...args); } } function multimodality_prepare_inputs_for_generation(self, input_ids, model_inputs, generation_config) { const has_past_key_values = !!model_inputs.past_key_values; if (generation_config.guidance_scale !== null && generation_config.guidance_scale > 1) { if (has_past_key_values) { model_inputs.input_ids = cat([ model_inputs.input_ids, model_inputs.input_ids, ], 0) // NOTE: attention_mask handled in generation } else { model_inputs.input_ids = cat([ model_inputs.input_ids, full_like(model_inputs.input_ids, BigInt(generation_config.pad_token_id)), ], 0); model_inputs.attention_mask = cat([ model_inputs.attention_mask, full_like(model_inputs.attention_mask, 0n), ], 0); } } if (has_past_key_values || !model_inputs.pixel_values) { model_inputs.pixel_values = full([0, 0, 3, 384, 384], 1.0); } if (has_past_key_values) { const num_img_tokens = 0; const num_text_tokens = 1; const has_image = num_img_tokens > 0 ? 1 : 0; const batch_size = 1; model_inputs.images_seq_mask = new Tensor( 'bool', new Array(num_img_tokens + num_text_tokens).fill(true).fill(false, 0, num_text_tokens), [batch_size, num_img_tokens + num_text_tokens], ); model_inputs.images_emb_mask = new Tensor( 'bool', new Array(num_img_tokens).fill(!!has_image), [batch_size, 1, num_img_tokens], ); } return model_inputs; } ////////////////////////////////////////////////// ////////////////////////////////////////////////// /** * A base class for pre-trained models that provides the model configuration and an ONNX session. */ export class PreTrainedModel extends Callable { main_input_name = 'input_ids'; forward_params = ['input_ids', 'attention_mask']; /** * Creates a new instance of the `PreTrainedModel` class. * @param {import('./configs.js').PretrainedConfig} config The model configuration. * @param {Record<string, any>} sessions The inference sessions for the model. * @param {Record<string, Object>} configs Additional configuration files (e.g., generation_config.json). */ constructor(config, sessions, configs) { super(); this.config = config; this.sessions = sessions; this.configs = configs; const modelName = MODEL_CLASS_TO_NAME_MAPPING.get(this.constructor); const modelType = MODEL_TYPE_MAPPING.get(modelName); this.can_generate = false; this._forward = null; this._prepare_inputs_for_generation = null; switch (modelType) { case MODEL_TYPES.DecoderOnly: this.can_generate = true; this._forward = decoderForward; this._prepare_inputs_for_generation = decoder_prepare_inputs_for_generation; break; case MODEL_TYPES.Seq2Seq: case MODEL_TYPES.Vision2Seq: case MODEL_TYPES.Musicgen: this.can_generate = true; this._forward = seq2seqForward; this._prepare_inputs_for_generation = encoder_decoder_prepare_inputs_for_generation; break; case MODEL_TYPES.EncoderDecoder: this._forward = seq2seqForward; break; case MODEL_TYPES.ImageTextToText: this.can_generate = true; this._forward = imageTextToTextForward; this._prepare_inputs_for_generation = image_text_to_text_prepare_inputs_for_generation; break; case MODEL_TYPES.Phi3V: this.can_generate = true; this._prepare_inputs_for_generation = image_text_to_text_prepare_inputs_for_generation; break; case MODEL_TYPES.MultiModality: this.can_generate = true; this._prepare_inputs_for_generation = multimodality_prepare_inputs_for_generation; break; default: // should be MODEL_TYPES.EncoderOnly this._forward = encoderForward; break; } if (this.can_generate) { this.forward_params.push('past_key_values'); } /** @type {import('./configs.js').TransformersJSConfig} */ this.custom_config = this.config['transformers.js_config'] ?? {}; } /** * Disposes of all the ONNX sessions that were created during inference. * @returns {Promise<unknown[]>} An array of promises, one for each ONNX session that is being disposed. * @todo Use https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry */ async dispose() { const promises = []; for (const session of Object.values(this.sessions)) { if (session?.handler?.dispose) { promises.push(session.handler.dispose()) } } return await Promise.all(promises); } /** * Instantiate one of the model classes of the library from a pretrained model. * * The model class to instantiate is selected based on the `model_type` property of the config object * (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) * * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: * - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a * user or organization name, like `dbmdz/bert-base-german-cased`. * - A path to a *directory* containing model weights, e.g., `./my_model_directory/`. * @param {import('./utils/hub.js').PretrainedModelOptions} options Additional options for loading the model. * * @returns {Promise<PreTrainedModel>} A new instance of the `PreTrainedModel` class. */ static async from_pretrained(pretrained_model_name_or_path, { progress_callback = null, config = null, cache_dir = null, local_files_only = false, revision = 'main', model_file_name = null, subfolder = 'onnx', device = null, dtype = null, use_external_data_format = null, session_options = {}, } = {}) { let options = { progress_callback, config, cache_dir, local_files_only, revision, model_file_name, subfolder, device, dtype, use_external_data_format, session_options, } const modelName = MODEL_CLASS_TO_NAME_MAPPING.get(this); const modelType = MODEL_TYPE_MAPPING.get(modelName); config = options.config = await AutoConfig.from_pretrained(pretrained_model_name_or_path, options); let info; if (modelType === MODEL_TYPES.DecoderOnly) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: options.model_file_name ?? 'model', }, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.Seq2Seq || modelType === MODEL_TYPES.Vision2Seq) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: 'encoder_model', decoder_model_merged: 'decoder_model_merged', }, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.MaskGeneration) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: 'vision_encoder', prompt_encoder_mask_decoder: 'prompt_encoder_mask_decoder', }, options), ]); } else if (modelType === MODEL_TYPES.EncoderDecoder) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: 'encoder_model', decoder_model_merged: 'decoder_model_merged', }, options), ]); } else if (modelType === MODEL_TYPES.ImageTextToText) { const sessions = { embed_tokens: 'embed_tokens', vision_encoder: 'vision_encoder', decoder_model_merged: 'decoder_model_merged', } if (config.is_encoder_decoder) { sessions['model'] = 'encoder_model'; } info = await Promise.all([ constructSessions(pretrained_model_name_or_path, sessions, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.Musicgen) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: 'text_encoder', decoder_model_merged: 'decoder_model_merged', encodec_decode: 'encodec_decode', }, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.MultiModality) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { prepare_inputs_embeds: 'prepare_inputs_embeds', model: 'language_model', lm_head: 'lm_head', gen_head: 'gen_head', gen_img_embeds: 'gen_img_embeds', image_decode: 'image_decode', }, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.Phi3V) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { prepare_inputs_embeds: 'prepare_inputs_embeds', model: 'model', vision_encoder: 'vision_encoder', }, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else { // should be MODEL_TYPES.EncoderOnly if (modelType !== MODEL_TYPES.EncoderOnly) { const type = modelName ?? config?.model_type; if (type !== 'custom') { console.warn(`Model type for '${type}' not found, assuming encoder-only architecture. Please report this at ${GITHUB_ISSUE_URL}.`) } } info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: options.model_file_name ?? 'model', }, options), ]); } // @ts-ignore return new this(config, ...info); } /** * Runs the model with the provided inputs * @param {Object} model_inputs Object containing input tensors * @returns {Promise<Object>} Object containing output tensors */ async _call(model_inputs) { return await this.forward(model_inputs); } /** * Forward method for a pretrained model. If not overridden by a subclass, the correct forward method * will be chosen based on the model type. * @param {Object} model_inputs The input data to the model in the format specified in the ONNX model. * @returns {Promise<Object>} The output data from the model in the format specified in the ONNX model. * @throws {Error} This method must be implemented in subclasses. */ async forward(model_inputs) { return await this._forward(this, model_inputs); } /** * Get the model's generation config, if it exists. * @returns {GenerationConfig|null} The model's generation config if it exists, otherwise `null`. */ get generation_config() { return this.configs?.generation_config ?? null; } /** * This function returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] * instances used for multinomial sampling. * @param {GenerationConfig} generation_config The generation config. * @returns {LogitsProcessorList} generation_config */ _get_logits_warper(generation_config) { // instantiate warpers list const warpers = new LogitsProcessorList(); if (generation_config.temperature !== null && generation_config.temperature !== 1.0) { warpers.push(new TemperatureLogitsWarper(generation_config.temperature)); } if (generation_config.top_k !== null && generation_config.top_k !== 0) { // TODO: add min_tokens_to_keep warpers.push(new TopKLogitsWarper(generation_config.top_k)); } if (generation_config.top_p !== null && generation_config.top_p < 1.0) { // TODO: add min_tokens_to_keep warpers.push(new TopPLogitsWarper(generation_config.top_p)); } return warpers; } /** * @param {GenerationConfig} generation_config * @param {number} input_ids_seq_length The starting sequence length for the input ids. * @returns {LogitsProcessorList} * @private */ _get_logits_processor( generation_config, input_ids_seq_length, // encoder_input_ids, TODO // prefix_allowed_tokens_fn, TODO logits_processor = null ) { const processors = new LogitsProcessorList(); // if (generation_config.diversity_penalty !== null && generation_config.diversity_penalty > 0.0) { // processors.push(new HammingDiversityLogitsProcessor( // generation_config.diversity_penalty, // generation_config.num_beams, // generation_config.num_beam_groups // )); // } // if (generation_config.encoder_repetition_penalty !== null && generation_config.encoder_repetition_penalty !== 1.0) { // processors.push(new EncoderRepetitionPenaltyLogitsProcessor( // generation_config.encoder_repetition_penalty, // encoder_input_ids // )); // } if (generation_config.repetition_penalty !== null && generation_config.repetition_penalty !== 1.0) { processors.push(new RepetitionPenaltyLogitsProcessor(generation_config.repetition_penalty)); } if (generation_config.no_repeat_ngram_size !== null && generation_config.no_repeat_ngram_size > 0) { processors.push(new NoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)); } // if (generation_config.encoder_no_repeat_ngram_size !== null && generation_config.encoder_no_repeat_ngram_size > 0) { // if (this.config.is_encoder_decoder) { // processors.push(new EncoderNoRepeatNGramLogitsProcessor( // generation_config.encoder_no_repeat_ngram_size, // encoder_input_ids // )); // } else { // throw new Error("It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture"); // } // } if (generation_config.bad_words_ids !== null) { processors.push(new NoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id)); } if (generation_config.min_length !== null && generation_config.eos_token_id !== null && generation_config.min_length > 0) { processors.push(new MinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)); } if (generation_config.min_new_tokens !== null && generation_config.eos_token_id !== null && generation_config.min_new_tokens > 0) { processors.push(new MinNewTokensLengthLogitsProcessor( input_ids_seq_length, generation_config.min_new_tokens, generation_config.eos_token_id )); } // if (prefix_allowed_tokens_fn !== null) { // processors.push(new PrefixConstrainedLogitsProcessor( // prefix_allowed_tokens_fn, // generation_config.num_beams / generation_config.num_beam_groups // )); // } if (generation_config.forced_bos_token_id !== null) { processors.push(new ForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)); } if (generation_config.forced_eos_token_id !== null) { processors.push(new ForcedEOSTokenLogitsProcessor( generation_config.max_length, generation_config.forced_eos_token_id )); } // if (generation_config.remove_invalid_values === true) { // processors.push(new InfNanRemoveLogitsProcessor()); // } // if (generation_config.exponential_decay_length_penalty !== null) { // processors.push(new ExponentialDecayLengthPenalty( // generation_config.exponential_decay_length_penalty, // generation_config.eos_token_id, // input_ids_seq_length // )); // } // if (generation_config.suppress_tokens !== null) { // processors.push(new SuppressTokensLogitsProcessor(generation_config.suppress_tokens)); // } if (generation_config.begin_suppress_tokens !== null) { const begin_index = (input_ids_seq_length > 1 || generation_config.forced_bos_token_id === null) ? input_ids_seq_length : input_ids_seq_length + 1; processors.push(new SuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)); } // DEPRECATED: https://github.com/huggingface/transformers/pull/29485 // if (generation_config.forced_decoder_ids !== null) { // processors.push(new ForceTokensLogitsProcessor(generation_config.forced_decoder_ids)); // } // 8. prepare batched CFG externally if (generation_config.guidance_scale !== null && generation_config.guidance_scale > 1) { processors.push(new ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)); } if (logits_processor !== null) { processors.extend(logits_processor) } // `LogitNormalization` should always be the last logit processor, when present // if (generation_config.renormalize_logits === true) { // processors.push(new LogitNormalization()); // } return processors; } /** * This function merges multiple generation configs together to form a final generation config to be used by the model for text generation. * It first creates an empty `GenerationConfig` object, then it applies the model's own `generation_config` property to it. Finally, if a `generation_config` object was passed in the arguments, it overwrites the corresponding properties in the final config with those of the passed config object. * @param {GenerationConfig|null} generation_config A `GenerationConfig` object containing generation parameters. * @param {Object} kwargs Additional generation parameters to be used in place of those in the `generation_config` object. * @returns {GenerationConfig} The final generation config object to be used by the model for text generation. */ _prepare_generation_config(generation_config, kwargs, cls = GenerationConfig) { // Create empty generation config (contains defaults) // We pass `this.config` so that if `eos_token_id` or `bos_token_id` exist in the model's config, we will use them const config = { ...this.config }; for (const key of ["decoder", "generator", "text_config"]) { // Special case: some models have generation attributes set in the decoder. // Use them if still unset in the generation config. if (key in config) { Object.assign(config, config[key]); } } const gen_config = new cls(config); // Apply model's generation config, if it exists Object.assign(gen_config, this.generation_config ?? {}); // Next, use any generation config specified by the user // when calling `generate` if (generation_config) { Object.assign(gen_config, generation_config); } // Finally, if any kwargs were passed, use them to overwrite if (kwargs) { Object.assign(gen_config, pick(kwargs, Object.getOwnPropertyNames(gen_config))); } return gen_config; } /** * * @param {GenerationConfig} generation_config * @param {StoppingCriteriaList} [stopping_criteria=null] */ _get_stopping_criteria(generation_config, stopping_criteria = null) { const criteria = new StoppingCriteriaList(); if (generation_config.max_length !== null) { criteria.push(new MaxLengthCriteria( generation_config.max_length, this.config.max_position_embeddings ?? null, )); } // if (generation_config.max_time !== null) { // criteria.push(new MaxTimeCriteria(generation_config.max_time)); // } if (generation_config.eos_token_id !== null) { criteria.push(new EosTokenCriteria(generation_config.eos_token_id)); } if (stopping_criteria) { criteria.extend(stopping_criteria); } return criteria; } /** * Confirms that the model class is compatible with generation. * If not, raises an exception that points to the right class to use. */ _validate_model_class() { if (!this.can_generate) { const generate_compatible_mappings = [ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, // MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, // TODO MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, ]; const modelName = MODEL_CLASS_TO_NAME_MAPPING.get(this.constructor); const generate_compatible_classes = new Set(); const modelType = this.config.model_type; for (const model_mapping of generate_compatible_mappings) { const supported_models = model_mapping.get(modelType); if (supported_models) { generate_compatible_classes.add(supported_models[0]); } } let errorMessage = `The current model class (${modelName}) is not compatible with \`.generate()\`, as it doesn't have a language model head.` if (generate_compatible_classes.size > 0) { errorMessage += ` Please use the following class instead: ${[...generate_compatible_classes].join(', ')}`; } throw Error(errorMessage); } } prepare_inputs_for_generation(...args) { return this._prepare_inputs_for_generation(this, ...args); } /** * * @param {Object} inputs * @param {bigint[][]} inputs.generated_input_ids * @param {Object} inputs.outputs * @param {Object} inputs.model_inputs * @param {boolean} inputs.is_encoder_decoder * @returns {Object} The updated model inputs for the next generation iteration. */ _update_model_kwargs_for_generation({ generated_input_ids, outputs, model_inputs, is_encoder_decoder }) { // update past_key_values model_inputs['past_key_values'] = this.getPastKeyValues(outputs, model_inputs.past_key_values); // update inputs for next run model_inputs['input_ids'] = new Tensor('int64', generated_input_ids.flat(), [generated_input_ids.length, 1]); if (!is_encoder_decoder) { // update attention mask model_inputs.attention_mask = cat( [ model_inputs.attention_mask, ones([model_inputs.attention_mask.dims[0], 1]), ], 1 ); } else if ('decoder_attention_mask' in model_inputs) { // TODO: update decoder attention mask if the model requires it } // force recreate position_ids in next iteration model_inputs['position_ids'] = null; return model_inputs; } /** * This function extracts the model-specific `inputs` for generation. * @param {Object} params * @param {Tensor} [params.inputs=null] * @param {number} [params.bos_token_id=null] * @param {Record<string, Tensor|number[]>} [params.model_kwargs] * @returns {{inputs_tensor: Tensor, model_inputs: Record<string, Tensor>, model_input_name: string}} The model-specific inputs for generation. */ _prepare_model_inputs({ inputs, bos_token_id, model_kwargs }) { const model_inputs = pick(model_kwargs, this.forward_params); const input_name = this.main_input_name; if (input_name in model_inputs) { if (inputs) { throw new Error( "`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. " + "Make sure to either pass {inputs} or {input_name}=..." ); } } else { model_inputs[input_name] = inputs; } const inputs_tensor = model_inputs[input_name]; return { inputs_tensor, model_inputs, model_input_name: input_name }; } async _prepare_encoder_decoder_kwargs_for_generation({ inputs_tensor, model_inputs, model_input_name, generation_config }) { if ( this.sessions['model'].inputNames.includes('inputs_embeds') && !model_inputs.inputs_embeds && '_prepare_inputs_embeds' in this ) { // Encoder expects `inputs_embeds` instead of `input_ids` const { input_ids, pixel_values, attention_mask, ...kwargs } = model_inputs; // @ts-ignore const prepared_inputs = await this._prepare_inputs_embeds(model_inputs); model_inputs = { ...kwargs, ...pick(prepared_inputs, ['inputs_embeds', 'attention_mask']), }; } let { last_hidden_state } = await encoderForward(this, model_inputs); // for classifier free guidance we need to add a 'null' input to our encoder hidden states if (generation_config.guidance_scale !== null && generation_config.guidance_scale > 1) { last_hidden_state = cat([ last_hidden_state, full_like(last_hidden_state, 0.0), ], 0); if ('attention_mask' in model_inputs) { model_inputs['attention_mask'] = cat([ model_inputs['attention_mask'], zeros_like(model_inputs['attention_mask']), ], 0); } } else if (model_inputs.decoder_input_ids) { // Ensure that the encoder outputs have the same batch size as the decoder inputs, // allowing for more efficient batched generation for single inputs const decoder_input_ids_batch_size = toI64Tensor(model_inputs.decoder_input_ids).dims[0]; if (decoder_input_ids_batch_size !== last_hidden_state.dims[0]) { if (last_hidden_state.dims[0] !== 1) { throw new Error( `The encoder outputs have a different batch size (${last_hidden_state.dims[0]}) than the decoder inputs (${decoder_input_ids_batch_size}).` ) } last_hidden_state = cat(Array.from({ length: decoder_input_ids_batch_size }, () => last_hidden_state), 0); } } model_inputs['encoder_outputs'] = last_hidden_state; return model_inputs; } /** * Prepares `decoder_input_ids` for generation with encoder-decoder models * @param {*} param0 */ _prepare_decoder_input_ids_for_generation({ batch_size, model_input_name, model_kwargs, decoder_start_token_id, bos_token_id, generation_config }) { let { decoder_input_ids, ...model_inputs } = model_kwargs; // Prepare input ids if the user has not defined `decoder_input_ids` manually. if (!(decoder_input_ids instanceof Tensor)) { if (!decoder_input_ids) { decoder_start_token_id ??= bos_token_id; if (this.config.model_type === 'musicgen') { // Custom logic (TODO: move to Musicgen class) decoder_input_ids = Array.from({ // @ts-expect-error TS2339 length: batch_size * this.config.decoder.num_codebooks }, () => [decoder_start_token_id]); } else if (Array.isArray(decoder_start_token_id)) { if (decoder_start_token_id.length !== batch_size) { throw new Error( `\`decoder_start_token_id\` expcted to have length ${batch_size} but got ${decoder_start_token_id.length}` ) } decoder_input_ids = decoder_start_token_id; } else { decoder_input_ids = Array.from({ length: batch_size, }, () => [decoder_start_token_id]); } } else if (!Array.isArray(decoder_input_ids[0])) { // Correct batch size decoder_input_ids = Array.from({ length: batch_size, }, () => decoder_input_ids); } decoder_input_ids = toI64Tensor(decoder_input_ids); } model_kwargs['decoder_attention_mask'] = ones_like(decoder_input_ids); return { input_ids: decoder_input_ids, model_inputs }; } /** * Generates sequences of token ids for models with a language modeling head. * @param {import('./generation/parameters.js').GenerationFunctionParameters} options * @returns {Promise<ModelOutput|Tensor>} The output of the model, which can contain the generated token ids, attentions, and scores. */ async generate({ inputs = null, generation_config = null, logits_processor = null, stopping_criteria = null, streamer = null, // inputs_attention_mask = null, ...kwargs }) { this._validate_model_class(); // Update generation config with defaults and kwargs generation_config = this._prepare_generation_config(generation_config, kwargs); // 3. Define model inputs let { inputs_tensor, model_inputs, model_input_name } = this._prepare_model_inputs({ inputs, model_kwargs: kwargs, }); const is_encoder_decoder = this.config.is_encoder_decoder; // 4. Define other model kwargs if (!is_encoder_decoder) { // decoder-only models should use left-padding for generation } else if (!('encoder_outputs' in model_inputs)) { // if model is encoder decoder encoder_outputs are created // and added to `model_kwargs` model_inputs = await this._prepare_encoder_decoder_kwargs_for_generation( { inputs_tensor, model_inputs, model_input_name, generation_config } ) } // 5. Prepare `input_ids` which will be used for auto-regressive generation // TODO: Update to align with HF transformers' implementation let input_ids; if (is_encoder_decoder) { // Generating from the encoder outputs ({ input_ids, model_inputs } = this._prepare_decoder_input_ids_for_generation({ batch_size: model_inputs[model_input_name].dims.at(0), model_input_name, model_kwargs: model_inputs, decoder_start_token_id: generation_config.decoder_start_token_id, bos_token_id: generation_config.bos_token_id, generation_config, })); } else { input_ids = model_inputs[model_input_name] } // 6. Prepare `max_length` depending on other stopping criteria. let input_ids_length = input_ids.dims.at(-1); if (generation_config.max_new_tokens !== null) { generation_config.max_length = input_ids_length + generation_config.max_new_tokens; } // input_ids_length = model_inputs[model_input_name].dims.at(1); // // inputs instanceof Tensor ? : inputs.length; // // decoder-only // if (input_ids_length === 0) { // throw Error("Must supply a non-empty array of input token ids.") // } // let decoder_input_ids = // generation_config.decoder_input_ids // ?? generation_config.decoder_start_token_id // ?? generation_config.bos_token_id // ?? generation_config.eos_token_id; // Update logits processor // 8. prepare distribution pre_processing samplers const prepared_logits_processor = this._get_logits_processor( generation_config, input_ids_length, logits_processor, ) // 9. prepare stopping criteria const prepared_stopping_criteria = this._get_stopping_criteria( generation_config, stopping_criteria ) // /** @type {number[]} */ // let eos_token_ids = generation_config.eos_token_id; // if (eos_token_ids !== null && !Array.isArray(eos_token_ids)) { // eos_token_ids = [eos_token_ids]; // } const numInputs = model_inputs[model_input_name].dims.at(0); // TODO: // done is a list of booleans to keep track of which inputs are done // const done = new Array(numInputs).fill(false); // For efficiency purposes, we remove completed rows from model_inputs // when the beam is complete, and we keep track of the row index // const rowIndexToBatchIndex = new Map(); const sampler = LogitsSampler.getSampler(generation_config); // TODO make > numInputs const scores = new Array(numInputs).fill(0); /** @type {bigint[][]} */ const all_input_ids = input_ids.tolist(); if (streamer) { streamer.put(all_input_ids); } // const all_generated_input_ids = Array.from({ length: numInputs }, () => []); // NOTE: For now, we don't support spawning new beams // TODO: when we do, we simply copy past key values and accumulate into single large tensor //////////////////////////////////////////////////// // Generic search which handles 4 generation modes: // - GenerationMode.GREEDY_SEARCH // - GenerationMode.SAMPLE // - GenerationMode.BEAM_SEARCH // - GenerationMode.BEAM_SAMPLE //////////////////////////////////////////////////// let outputs; let attentions = {}; while (true) { // prepare model inputs model_inputs = this.prepare_inputs_for_generation(all_input_ids, model_inputs, generation_config); outputs = await this.forward(model_inputs); if (generation_config.output_attentions && generation_config.return_dict_in_generate) { // Get attentions if they are present const token_attentions = this.getAttentions(outputs); for (const key in token_attentions) { if (!(key in attentions)) { attentions[key] = []; } attentions[key].push(token_attentions[key]); } } // Logits are of the form [batch_size, out_seq_length, vocab_size] // In most cases, this will be [batch_size, 1, vocab_size] // So, we select the last token's logits: // (equivalent to `logits = outputs.logits[:, -1, :]`) const logits = outputs.logits.slice(null, -1, null); const next_tokens_scores = prepared_logits_processor(all_input_ids, logits); /** @type {[bigint][]} */ const generated_input_ids = []; // const new_kv_cache = [];// NOTE: Only used for beam search when concatenating new kv // Loop over each batch for (let batch_idx = 0; batch_idx < next_tokens_scores.dims.at(0); ++batch_idx) { const logs = next_tokens_scores[batch_idx]; const sampledTokens = await sampler(logs); for (const [newTokenId, logProb] of sampledTokens) { const bigint = BigInt(newTokenId); // TODO: If branching, use previous beam as a starting point // update generated ids, model inputs, and length for next step scores[batch_idx] += logProb; all_input_ids[batch_idx].push(bigint); generated_input_ids.push([bigint]); // TODO: Support beam search break; } } if (streamer) { streamer.put(generated_input_ids); } const stop = prepared_stopping_criteria(all_input_ids); if (stop.every(x => x)) { break; } model_inputs = this._update_model_kwargs_for_generation({ generated_input_ids, outputs, model_inputs, is_encoder_decoder, }); } if (streamer) { streamer.end(); } // Retrieve and dispose all final past key values (including encoder attentions) const past_key_values = this.getPastKeyValues(outputs, model_inputs.past_key_values, true); // TODO: ensure all_input_ids is padded correctly... const sequences = new Tensor('int64', all_input_ids.flat(), [all_input_ids.length, all_input_ids[0].length]); if (generation_config.return_dict_in_generate) { return { sequences, past_key_values, ...attentions, // TODO: // scores, // logits, } } else { // Dispose all remaining tensors for (const tensor of Object.values(outputs)) { if (tensor.location === 'gpu-buffer') { tensor.dispose(); } } return sequences; } } /** * Returns an object containing past key values from the given decoder results object. * * @param {Object} decoderResults The decoder results object. * @param {Object} pastKeyValues The previous past key values. * @returns {Object} An object containing past key values. */ getPastKeyValues(decoderResults, pastKeyValues, disposeEncoderPKVs = false) { const pkvs = Object.create(null); for (const name in decoderResults) { if (name.startsWith('present')) { const newName = name.replace('present', 'past_key_values'); const is_encoder_pkv = name.includes('encoder'); if (is_encoder_pkv && pastKeyValues) { // Optimization introduced by optimum to reuse past key values. // So, we just replace the constant outputs (`decoderResults[name]`) with the previous past key values. // https://github.com/huggingface/optimum/blob/0bf2c05fb7e1182b52d21b703cfc95fd9e4ea3dc/optimum/onnxruntime/base.py#L677-L704 pkvs[newName] = pastKeyValues[newName]; } else { // decoder or using first encoder PKVs pkvs[newName] = decoderResults[name]; } if (pastKeyValues && (!is_encoder_pkv || disposeEncoderPKVs)) { // - Always dispose decoder PKVs // - Only dispose encoder past key values when requested (after generation) const t = pastKeyValues[newName]; if (t.location === 'gpu-buffer') { t.dispose(); } } } } return pkvs; } /** * Returns an object containing attentions from the given model output object. * * @param {Object} model_output The output of the model. * @returns {{cross_attentions?: Tensor[]}} An object containing attentions. */ getAttentions(model_output) { const attentions = {}; for (const attnName of ['cross_attentions', 'encoder_attentions', 'decoder_attentions']) { for (const name in model_output) { if (name.startsWith(attnName)) { if (!(attnName in attentions)) { attentions[attnName] = []; } attentions[attnName].push(model_output[name]); } } } return attentions; } /** * Adds past key values to the decoder feeds object. If pastKeyValues is null, creates new tensors for past key values. * * @param {Object} decoderFeeds The decoder feeds object to add past key values to. * @param {Object} pastKeyValues An object containing past key values. */ addPastKeyValues(decoderFeeds, pastKeyValues) { if (pastKeyValues) { Object.assign(decoderFeeds, pastKeyValues) } else { const session = this.sessions['decoder_model_merged'] ?? this.sessions['model']; const dtype = session?.config?.kv_cache_dtype ?? 'float32'; const empty = (dtype === 'float16') ? new Uint16Array() : []; const batch_size = (decoderFeeds[this.main_input_name] ?? decoderFeeds.attention_mask)?.dims?.[0] ?? 1; const shapes = getKeyValueShapes(this.config, { batch_size }); for (const name in shapes) { decoderFeeds[name] = new Tensor(dtype, empty, shapes[name]); } } } async encode_image({ pixel_values }) { // image_inputs === { pixel_values } const features = (await sessionRun(this.sessions['vision_encoder'], { pixel_values })).image_features; // @ts-expect-error TS2339 if (!this.config.num_image_tokens) { console.warn( 'The number of image tokens was not set in the model configuration. ' + `Setting it to the number of features detected by the vision encoder (${features.dims[1]}).` ) // @ts-expect-error TS2339 this.config.num_image_tokens = features.dims[1]; } return features; } async encode_text({ input_ids }) { // text_inputs === { input_ids, attention_mask } return (await sessionRun(this.sessions['embed_tokens'], { input_ids })).inputs_embeds; } } ////////////////////////////////////////////////// // Base model output class export class ModelOutput { } /** * Base class for model's outputs, with potential hidden states and attentions. */ export class BaseModelOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.last_hidden_state Sequence of hidden-states at the output of the last layer of the model. * @param {Tensor} [output.hidden_states] Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. * @param {Tensor} [output.attentions] Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. */ constructor({ last_hidden_state, hidden_states = null, attentions = null }) { super(); this.last_hidden_state = last_hidden_state; this.hidden_states = hidden_states; this.attentions = attentions; } } ////////////////////////////////////////////////// // Bert models export class BertPreTrainedModel extends PreTrainedModel { } export class BertModel extends BertPreTrainedModel { } /** * BertForMaskedLM is a class representing a BERT model for masked language modeling. */ export class BertForMaskedLM extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * BertForSequenceClassification is a class representing a BERT model for sequence classification. */ export class BertForSequenceClassification extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * BertForTokenClassification is a class representing a BERT model for token classification. */ export class BertForTokenClassification extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * BertForQuestionAnswering is a class representing a BERT model for question answering. */ export class BertForQuestionAnswering extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // ModernBert models export class ModernBertPreTrainedModel extends PreTrainedModel { } export class ModernBertModel extends ModernBertPreTrainedModel { } export class ModernBertForMaskedLM extends ModernBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } export class ModernBertForSequenceClassification extends ModernBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class ModernBertForTokenClassification extends ModernBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // NomicBert models export class NomicBertPreTrainedModel extends PreTrainedModel { } export class NomicBertModel extends NomicBertPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // RoFormer models export class RoFormerPreTrainedModel extends PreTrainedModel { } /** * The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top. */ export class RoFormerModel extends RoFormerPreTrainedModel { } /** * RoFormer Model with a `language modeling` head on top. */ export class RoFormerForMaskedLM extends RoFormerPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class RoFormerForSequenceClassification extends RoFormerPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) * e.g. for Named-Entity-Recognition (NER) tasks. */ export class RoFormerForTokenClassification extends RoFormerPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD * (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). */ export class RoFormerForQuestionAnswering extends RoFormerPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } // TODO: Add RoFormerForCausalLM and RoFormerForMultipleChoice ////////////////////////////////////////////////// ////////////////////////////////////////////////// // ConvBert models export class ConvBertPreTrainedModel extends PreTrainedModel { } /** * The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top. */ export class ConvBertModel extends ConvBertPreTrainedModel { } /** * ConvBERT Model with a language modeling head on top. */ export class ConvBertForMaskedLM extends ConvBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class ConvBertForSequenceClassification extends ConvBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) * e.g. for Named-Entity-Recognition (NER) tasks. */ export class ConvBertForTokenClassification extends ConvBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD * (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`) */ export class ConvBertForQuestionAnswering extends ConvBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Electra models export class ElectraPreTrainedModel extends PreTrainedModel { } /** * The bare Electra Model transformer outputting raw hidden-states without any specific head on top. * Identical to the BERT model except that it uses an additional linear layer between the embedding * layer and the encoder if the hidden size and embedding size are different. */ export class ElectraModel extends ElectraPreTrainedModel { } // TODO add ElectraForPreTraining /** * Electra model with a language modeling head on top. */ export class ElectraForMaskedLM extends ElectraPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class ElectraForSequenceClassification extends ElectraPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * Electra model with a token classification head on top. */ export class ElectraForTokenClassification extends ElectraPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * LECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD * (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). */ export class ElectraForQuestionAnswering extends ElectraPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // CamemBERT models export class CamembertPreTrainedModel extends PreTrainedModel { } /** * The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top. */ export class CamembertModel extends CamembertPreTrainedModel { } /** * CamemBERT Model with a `language modeling` head on top. */ export class CamembertForMaskedLM extends CamembertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. */ export class CamembertForSequenceClassification extends CamembertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. */ export class CamembertForTokenClassification extends CamembertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * CamemBERT Model with a span classification head on top for extractive question-answering tasks */ export class CamembertForQuestionAnswering extends CamembertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // DeBERTa models export class DebertaPreTrainedModel extends PreTrainedModel { } /** * The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. */ export class DebertaModel extends DebertaPreTrainedModel { } /** * DeBERTa Model with a `language modeling` head on top. */ export class DebertaForMaskedLM extends DebertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class DebertaForSequenceClassification extends DebertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. */ export class DebertaForTokenClassification extends DebertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear * layers on top of the hidden-states output to compute `span start logits` and `span end logits`). */ export class DebertaForQuestionAnswering extends DebertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // DeBERTa-v2 models export class DebertaV2PreTrainedModel extends PreTrainedModel { } /** * The bare DeBERTa-V2 Model transformer outputting raw hidden-states without any specific head on top. */ export class DebertaV2Model extends DebertaV2PreTrainedModel { } /** * DeBERTa-V2 Model with a `language modeling` head on top. */ export class DebertaV2ForMaskedLM extends DebertaV2PreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * DeBERTa-V2 Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class DebertaV2ForSequenceClassification extends DebertaV2PreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * DeBERTa-V2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. */ export class DebertaV2ForTokenClassification extends DebertaV2PreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * DeBERTa-V2 Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear * layers on top of the hidden-states output to compute `span start logits` and `span end logits`). */ export class DebertaV2ForQuestionAnswering extends DebertaV2PreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // DistilBert models export class DistilBertPreTrainedModel extends PreTrainedModel { } export class DistilBertModel extends DistilBertPreTrainedModel { } /** * DistilBertForSequenceClassification is a class representing a DistilBERT model for sequence classification. */ export class DistilBertForSequenceClassification extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * DistilBertForTokenClassification is a class representing a DistilBERT model for token classification. */ export class DistilBertForTokenClassification extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * DistilBertForQuestionAnswering is a class representing a DistilBERT model for question answering. */ export class DistilBertForQuestionAnswering extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } /** * DistilBertForMaskedLM is a class representing a DistilBERT model for masking task. */ export class DistilBertForMaskedLM extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // ESM models export class EsmPreTrainedModel extends PreTrainedModel { } /** * The bare ESM Model transformer outputting raw hidden-states without any specific head on top. */ export class EsmModel extends EsmPreTrainedModel { } /** * ESM Model with a `language modeling` head on top. */ export class EsmForMaskedLM extends EsmPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class EsmForSequenceClassification extends EsmPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) * e.g. for Named-Entity-Recognition (NER) tasks. */ export class EsmForTokenClassification extends EsmPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileBert models export class MobileBertPreTrainedModel extends PreTrainedModel { } export class MobileBertModel extends MobileBertPreTrainedModel { } /** * MobileBertForMaskedLM is a class representing a MobileBERT model for masking task. */ export class MobileBertForMaskedLM extends MobileBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class MobileBertForSequenceClassification extends MobileBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * MobileBert Model with a span classification head on top for extractive question-answering tasks */ export class MobileBertForQuestionAnswering extends MobileBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MPNet models export class MPNetPreTrainedModel extends PreTrainedModel { } /** * The bare MPNet Model transformer outputting raw hidden-states without any specific head on top. */ export class MPNetModel extends MPNetPreTrainedModel { } /** * MPNetForMaskedLM is a class representing a MPNet model for masked language modeling. */ export class MPNetForMaskedLM extends MPNetPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * MPNetForSequenceClassification is a class representing a MPNet model for sequence classification. */ export class MPNetForSequenceClassification extends MPNetPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * MPNetForTokenClassification is a class representing a MPNet model for token classification. */ export class MPNetForTokenClassification extends MPNetPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * MPNetForQuestionAnswering is a class representing a MPNet model for question answering. */ export class MPNetForQuestionAnswering extends MPNetPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // SqueezeBert models export class SqueezeBertPreTrainedModel extends PreTrainedModel { } export class SqueezeBertModel extends SqueezeBertPreTrainedModel { } export class SqueezeBertForMaskedLM extends SqueezeBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } export class SqueezeBertForSequenceClassification extends SqueezeBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class SqueezeBertForQuestionAnswering extends SqueezeBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Albert models export class AlbertPreTrainedModel extends PreTrainedModel { } export class AlbertModel extends AlbertPreTrainedModel { } export class AlbertForSequenceClassification extends AlbertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class AlbertForQuestionAnswering extends AlbertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } export class AlbertForMaskedLM extends AlbertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // T5 models export class T5PreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', 'attention_mask', 'encoder_outputs', 'decoder_input_ids', 'decoder_attention_mask', 'past_key_values', ]; }; export class T5Model extends T5PreTrainedModel { } /** * T5Model is a class representing a T5 model for conditional generation. */ export class T5ForConditionalGeneration extends T5PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // LONGT5 models /** * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. */ export class LongT5PreTrainedModel extends PreTrainedModel { }; /** * The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top. */ export class LongT5Model extends LongT5PreTrainedModel { } /** * LONGT5 Model with a `language modeling` head on top. */ export class LongT5ForConditionalGeneration extends LongT5PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MT5 models export class MT5PreTrainedModel extends PreTrainedModel { }; export class MT5Model extends MT5PreTrainedModel { } /** * A class representing a conditional sequence-to-sequence model based on the MT5 architecture. */ export class MT5ForConditionalGeneration extends MT5PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Bart models export class BartPretrainedModel extends PreTrainedModel { }; /** * The bare BART Model outputting raw hidden-states without any specific head on top. */ export class BartModel extends BartPretrainedModel { } /** * The BART Model with a language modeling head. Can be used for summarization. */ export class BartForConditionalGeneration extends BartPretrainedModel { } /** * Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) */ export class BartForSequenceClassification extends BartPretrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MBart models export class MBartPreTrainedModel extends PreTrainedModel { }; /** * The bare MBART Model outputting raw hidden-states without any specific head on top. */ export class MBartModel extends MBartPreTrainedModel { } /** * The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models. */ export class MBartForConditionalGeneration extends MBartPreTrainedModel { } /** * MBart model with a sequence classification/head on top (a linear layer on top of the pooled output). */ export class MBartForSequenceClassification extends MBartPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class MBartForCausalLM extends MBartPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Blenderbot models export class BlenderbotPreTrainedModel extends PreTrainedModel { }; /** * The bare Blenderbot Model outputting raw hidden-states without any specific head on top. */ export class BlenderbotModel extends BlenderbotPreTrainedModel { } /** * The Blenderbot Model with a language modeling head. Can be used for summarization. */ export class BlenderbotForConditionalGeneration extends BlenderbotPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Blenderbot models export class BlenderbotSmallPreTrainedModel extends PreTrainedModel { }; /** * The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top. */ export class BlenderbotSmallModel extends BlenderbotSmallPreTrainedModel { } /** * The BlenderbotSmall Model with a language modeling head. Can be used for summarization. */ export class BlenderbotSmallForConditionalGeneration extends BlenderbotSmallPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Roberta models export class RobertaPreTrainedModel extends PreTrainedModel { } export class RobertaModel extends RobertaPreTrainedModel { } /** * RobertaForMaskedLM class for performing masked language modeling on Roberta models. */ export class RobertaForMaskedLM extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * RobertaForSequenceClassification class for performing sequence classification on Roberta models. */ export class RobertaForSequenceClassification extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * RobertaForTokenClassification class for performing token classification on Roberta models. */ export class RobertaForTokenClassification extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * RobertaForQuestionAnswering class for performing question answering on Roberta models. */ export class RobertaForQuestionAnswering extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // XLM models /** * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. */ export class XLMPreTrainedModel extends PreTrainedModel { } /** * The bare XLM Model transformer outputting raw hidden-states without any specific head on top. */ export class XLMModel extends XLMPreTrainedModel { } /** * The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class XLMWithLMHeadModel extends XLMPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class XLMForSequenceClassification extends XLMPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) */ export class XLMForTokenClassification extends XLMPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * XLM Model with a span classification head on top for extractive question-answering tasks */ export class XLMForQuestionAnswering extends XLMPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // XLMRoberta models export class XLMRobertaPreTrainedModel extends PreTrainedModel { } export class XLMRobertaModel extends XLMRobertaPreTrainedModel { } /** * XLMRobertaForMaskedLM class for performing masked language modeling on XLMRoberta models. */ export class XLMRobertaForMaskedLM extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * XLMRobertaForSequenceClassification class for performing sequence classification on XLMRoberta models. */ export class XLMRobertaForSequenceClassification extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * XLMRobertaForTokenClassification class for performing token classification on XLMRoberta models. */ export class XLMRobertaForTokenClassification extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * XLMRobertaForQuestionAnswering class for performing question answering on XLMRoberta models. */ export class XLMRobertaForQuestionAnswering extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Audio Spectrogram Transformer (AST) models export class ASTPreTrainedModel extends PreTrainedModel { }; /** * The bare AST Model transformer outputting raw hidden-states without any specific head on top. */ export class ASTModel extends ASTPreTrainedModel { } /** * Audio Spectrogram Transformer model with an audio classification head on top * (a linear layer on top of the pooled output) e.g. for datasets like AudioSet, Speech Commands v2. */ export class ASTForAudioClassification extends ASTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Whisper models export class WhisperPreTrainedModel extends PreTrainedModel { requires_attention_mask = false; main_input_name = 'input_features'; forward_params = [ 'input_features', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'past_key_values', ]; }; /** * WhisperModel class for training Whisper models without a language model head. */ export class WhisperModel extends WhisperPreTrainedModel { } /** * WhisperForConditionalGeneration class for generating conditional outputs from Whisper models. */ export class WhisperForConditionalGeneration extends WhisperPreTrainedModel { _prepare_generation_config(generation_config, kwargs) { return /** @type {WhisperGenerationConfig} */ (super._prepare_generation_config(generation_config, kwargs, WhisperGenerationConfig)); } /** * * @param {WhisperGenerationConfig} generation_config */ _retrieve_init_tokens(generation_config) { // prefix tokens are of the form: // - Multilingual: <|startoftranscript|> <|lang_id|> <|task|> [<|notimestamps|>] // - English-only: <|startoftranscript|> [<|notimestamps|>] // 1. Handle <|startoftranscript|> token const init_tokens = [generation_config.decoder_start_token_id]; // 2. Handle <|lang_id|> and <|task> tokens let language = generation_config.language; const task = generation_config.task; if (generation_config.is_multilingual) { if (!language) { // TODO: Implement language detection console.warn('No language specified - defaulting to English (en).'); language = 'en'; } // Add language token const language_code = whisper_language_to_code(language); const language_token = `<|${language_code}|>`; init_tokens.push(generation_config.lang_to_id[language_token]) // Add task token // NOTE: Defaults to 'transcribe' if no task is specified init_tokens.push(generation_config.task_to_id[task ?? 'transcribe']); } else if (language || task) { throw new Error( "Cannot specify `task` or `language` for an English-only model. If the model is intended to be multilingual, pass `is_multilingual=true` to generate, or update the generation config." ) } // 3. Handle <|notimestamps|> token if ( !generation_config.return_timestamps && generation_config.no_timestamps_token_id && init_tokens.at(-1) !== generation_config.no_timestamps_token_id ) { init_tokens.push(generation_config.no_timestamps_token_id); } else if ( generation_config.return_timestamps && init_tokens.at(-1) === generation_config.no_timestamps_token_id ) { console.warn("<|notimestamps|> prompt token is removed from generation_config since `return_timestamps` is set to `true`."); init_tokens.pop(); } // let's make sure we don't pass `null` tokens as prompt tokens return init_tokens.filter(token => token != null); } /** * Transcribes or translates log-mel input features to a sequence of auto-regressively generated token ids. * @param {import('./models/whisper/generation_whisper.js').WhisperGenerationFunctionParameters} options * @returns {Promise<ModelOutput|Tensor>} The output of the model, which can contain the generated token ids, attentions, and scores. */ async generate({ inputs = null, generation_config = null, logits_processor = null, stopping_criteria = null, // Whisper-specific options (passed to kwargs) // prompt_ids = null, // language = null, // task = null, ...kwargs }) { generation_config = this._prepare_generation_config(generation_config, kwargs); const init_tokens = kwargs.decoder_input_ids ?? this._retrieve_init_tokens(generation_config); if (generation_config.return_timestamps) { logits_processor ??= new LogitsProcessorList(); logits_processor.push( new WhisperTimeStampLogitsProcessor(generation_config, init_tokens) ); } if (generation_config.begin_suppress_tokens) { logits_processor ??= new LogitsProcessorList(); logits_processor.push( new SuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, init_tokens.length) ); } if (generation_config.return_token_timestamps) { if (!generation_config.alignment_heads) { throw new Error( "Model generation config has no `alignment_heads`, token-level timestamps not available. " + "See https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a on how to add this property to the generation config." ) } if (generation_config.task === 'translate') { console.warn("Token-level timestamps may not be reliable for task 'translate'.") } generation_config.output_attentions = true; generation_config.return_dict_in_generate = true; } const outputs = await super.generate({ inputs, generation_config, logits_processor, decoder_input_ids: init_tokens, ...kwargs }); if (generation_config.return_token_timestamps) { outputs["token_timestamps"] = this._extract_token_timestamps( // @ts-expect-error TS2345 outputs, generation_config.alignment_heads, generation_config.num_frames, ); } return outputs; } /** * Calculates token-level timestamps using the encoder-decoder cross-attentions and * dynamic time-warping (DTW) to map each output token to a position in the input audio. * If `num_frames` is specified, the encoder-decoder cross-attentions will be cropped before applying DTW. * @param {Object} generate_outputs Outputs generated by the model * @param {Tensor[][]} generate_outputs.cross_attentions The cross attentions output by the model * @param {Tensor} generate_outputs.sequences The sequences output by the model * @param {number[][]} alignment_heads Alignment heads of the model * @param {number} [num_frames=null] Number of frames in the input audio. * @param {number} [time_precision=0.02] Precision of the timestamps in seconds * @returns {Tensor} tensor containing the timestamps in seconds for each predicted token */ _extract_token_timestamps(generate_outputs, alignment_heads, num_frames = null, time_precision = 0.02) { if (!generate_outputs.cross_attentions) { throw new Error( "Model outputs must contain cross attentions to extract timestamps. " + "This is most likely because the model was not exported with `output_attentions=True`." ) } if (num_frames == null) { console.warn( "`num_frames` has not been set, meaning the entire audio will be analyzed. " + "This may lead to inaccurate token-level timestamps for short audios (< 30 seconds)." ); } // @ts-expect-error TS2339 let median_filter_width = this.config.median_filter_width; if (median_filter_width === undefined) { console.warn("Model config has no `median_filter_width`, using default value of 7.") median_filter_width = 7; } // TODO: Improve batch processing const batch = generate_outputs.cross_attentions; // Create a list with `decoder_layers` elements, each a tensor of shape // (batch size, attention_heads, output length, input length). // @ts-expect-error TS2339 const cross_attentions = Array.from({ length: this.config.decoder_layers }, // Concatenate the cross attentions for each layer across sequence length dimension. (_, i) => cat(batch.map(x => x[i]), 2) ); const weights = stack(alignment_heads.map(([l, h]) => { if (l >= cross_attentions.length) { throw new Error(`Layer index ${l} is out of bounds for cross attentions (length ${cross_attentions.length}).`) } return num_frames ? cross_attentions[l].slice(null, h, null, [0, num_frames]) : cross_attentions[l].slice(null, h); })).transpose(1, 0, 2, 3); const [std, calculatedMean] = std_mean(weights, -2, 0, true); // Normalize and smoothen the weights. const smoothedWeights = weights.clone(); // [1, 8, seqLength, 1500] for (let a = 0; a < smoothedWeights.dims[0]; ++a) { const aTensor = smoothedWeights[a]; // [8, seqLength, 1500] for (let b = 0; b < aTensor.dims[0]; ++b) { const bTensor = aTensor[b]; // [seqLength, 1500] const stdTensorData = std[a][b][0].data; // [1500] const meanTensorData = calculatedMean[a][b][0].data; // [1500] for (let c = 0; c < bTensor.dims[0]; ++c) { let cTensorData = bTensor[c].data; // [1500] for (let d = 0; d < cTensorData.length; ++d) { cTensorData[d] = (cTensorData[d] - meanTensorData[d]) / stdTensorData[d] } // Apply median filter. cTensorData.set(medianFilter(cTensorData, median_filter_width)) } } } // Average the different cross-attention heads. const batchedMatrices = [mean(smoothedWeights, 1)]; const timestampsShape = generate_outputs.sequences.dims; const timestamps = new Tensor( 'float32', new Float32Array(timestampsShape[0] * timestampsShape[1]), timestampsShape ); // Perform dynamic time warping on each element of the batch. for (let batch_idx = 0; batch_idx < timestampsShape[0]; ++batch_idx) { // NOTE: Since we run only one batch at a time, we can squeeze to get the same dimensions // as the python implementation const matrix = batchedMatrices[batch_idx].neg().squeeze_(0); const [text_indices, time_indices] = dynamic_time_warping(matrix.tolist()); const diffs = Array.from({ length: text_indices.length - 1 }, (v, i) => text_indices[i + 1] - text_indices[i]); const jumps = mergeArrays([1], diffs).map(x => !!x); // convert to boolean const jump_times = []; for (let i = 0; i < jumps.length; ++i) { if (jumps[i]) { // NOTE: No point in rounding here, since we set to Float32Array later jump_times.push(time_indices[i] * time_precision); } } timestamps[batch_idx].data.set(jump_times, 1) } return timestamps; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Moonshine models export class MoonshinePreTrainedModel extends PreTrainedModel { requires_attention_mask = false; main_input_name = 'input_values'; forward_params = [ 'input_values', 'decoder_input_ids', 'past_key_values', ]; }; /** * MoonshineModel class for training Moonshine models without a language model head. */ export class MoonshineModel extends MoonshinePreTrainedModel { } export class MoonshineForConditionalGeneration extends MoonshinePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// /** * Vision Encoder-Decoder model based on OpenAI's GPT architecture for image captioning and other vision tasks */ export class VisionEncoderDecoderModel extends PreTrainedModel { main_input_name = 'pixel_values'; forward_params = [ // Encoder inputs 'pixel_values', // Decoder inpputs 'decoder_input_ids', 'encoder_hidden_states', 'past_key_values', ]; } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // LLaVa Models export class LlavaPreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', 'attention_mask', 'pixel_values', 'position_ids', 'past_key_values', ]; } /** * The LLAVA model which consists of a vision backbone and a language model. */ export class LlavaForConditionalGeneration extends LlavaPreTrainedModel { _merge_input_ids_with_image_features({ inputs_embeds, image_features, input_ids, attention_mask, }) { // @ts-expect-error TS2339 const image_token_index = this.config.image_token_index; const idsList = input_ids.tolist(); // NOTE: we use .findIndex instead of .indexOf to perform weak comparison (==) between BigInt and Number const indexOfImage = idsList.map(x => x.findIndex(x => x == image_token_index)); const noImages = indexOfImage.every(x => x === -1); const allImages = indexOfImage.every(x => x !== -1); if (!noImages && !allImages) { // Check for padding reasons throw new Error('Every input should contain either 0 or 1 image token.'); } if (noImages) { return { inputs_embeds, attention_mask, } } const stacked = []; const stacked_attention_mask = []; for (let i = 0; i < indexOfImage.length; ++i) { const index = indexOfImage[i]; const e = inputs_embeds[i]; const im = image_features[i]; const am = attention_mask[i]; stacked.push( cat([ e.slice([0, index]), im, e.slice([index + 1, e.dims[0]]), ], 0) ); stacked_attention_mask.push( cat([ am.slice([0, index]), ones([im.dims[0]]), am.slice([index + 1, am.dims[0]]) ], 0) ) } return { inputs_embeds: stack(stacked, 0), attention_mask: stack(stacked_attention_mask, 0), } } } ////////////////////////////////////////////////// export class LlavaOnevisionForConditionalGeneration extends LlavaForConditionalGeneration { } // NOTE: extends LlavaForConditionalGeneration export class Moondream1ForConditionalGeneration extends LlavaForConditionalGeneration { } // NOTE: extends LlavaForConditionalGeneration export class Florence2PreTrainedModel extends PreTrainedModel { forward_params = [ // Encoder inputs 'input_ids', 'inputs_embeds', 'attention_mask', 'pixel_values', // Decoder inputs 'encoder_outputs', 'decoder_input_ids', 'decoder_inputs_embeds', 'decoder_attention_mask', 'past_key_values', ]; main_input_name = 'inputs_embeds'; } export class Florence2ForConditionalGeneration extends Florence2PreTrainedModel { _merge_input_ids_with_image_features({ inputs_embeds, image_features, input_ids, attention_mask, }) { return { inputs_embeds: cat([ image_features, // image embeds inputs_embeds, // task prefix embeds ], 1), attention_mask: cat([ ones(image_features.dims.slice(0, 2)), // image attention mask attention_mask, // task prefix attention mask ], 1), } } async _prepare_inputs_embeds({ input_ids, pixel_values, inputs_embeds, attention_mask }) { if (!input_ids && !pixel_values) { throw new Error('Either `input_ids` or `pixel_values` should be provided.'); } // 1. Possibly, extract the input embeddings let text_features, image_features; if (input_ids) { text_features = await this.encode_text({ input_ids }); } if (pixel_values) { image_features = await this.encode_image({ pixel_values }); } // 2. Possibly, merge text and images if (text_features && image_features) { ({ inputs_embeds, attention_mask } = this._merge_input_ids_with_image_features({ inputs_embeds: text_features, image_features, input_ids, attention_mask, })); } else { inputs_embeds = text_features || image_features; } return { inputs_embeds, attention_mask }; } async forward({ input_ids, pixel_values, attention_mask, decoder_input_ids, decoder_attention_mask, encoder_outputs, past_key_values, inputs_embeds, decoder_inputs_embeds, }) { if (!inputs_embeds) { ({ inputs_embeds, attention_mask } = await this._prepare_inputs_embeds({ input_ids, pixel_values, inputs_embeds, attention_mask })); } if (!encoder_outputs) { // Must compute encoder outputs let { last_hidden_state } = await encoderForward(this, { inputs_embeds, attention_mask }); encoder_outputs = last_hidden_state; } if (!decoder_inputs_embeds) { if (!decoder_input_ids) { throw new Error('Either `decoder_input_ids` or `decoder_inputs_embeds` should be provided.'); } decoder_inputs_embeds = await this.encode_text({ input_ids: decoder_input_ids }); } const decoderFeeds = { inputs_embeds: decoder_inputs_embeds, attention_mask: decoder_attention_mask, encoder_attention_mask: attention_mask, encoder_hidden_states: encoder_outputs, past_key_values, }; const decoder_outputs = await decoderForward(this, decoderFeeds, true); return decoder_outputs; } } export class PaliGemmaPreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', // 'inputs_embeds', 'attention_mask', 'pixel_values', 'position_ids', 'past_key_values', ]; } export class PaliGemmaForConditionalGeneration extends PaliGemmaPreTrainedModel { _merge_input_ids_with_image_features(kwargs) { const vision_hidden_size = kwargs.image_features.dims.at(-1); const reshaped_image_hidden_states = kwargs.image_features.view(-1, vision_hidden_size); return default_merge_input_ids_with_image_features({ // @ts-ignore image_token_id: this.config.image_token_index, ...kwargs, image_features: reshaped_image_hidden_states, }) } } ////////////////////////////////////////////////// // Idefics3 Models export class Idefics3PreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', 'attention_mask', 'pixel_values', 'pixel_attention_mask', 'position_ids', 'past_key_values', ]; } /** * The LLAVA model which consists of a vision backbone and a language model. */ export class Idefics3ForConditionalGeneration extends Idefics3PreTrainedModel { async encode_image({ pixel_values, pixel_attention_mask }) { const features = (await sessionRun(this.sessions['vision_encoder'], { pixel_values, pixel_attention_mask })).image_features; return features; } _merge_input_ids_with_image_features(kwargs) { const vision_hidden_size = kwargs.image_features.dims.at(-1); const reshaped_image_hidden_states = kwargs.image_features.view(-1, vision_hidden_size); return default_merge_input_ids_with_image_features({ // @ts-ignore image_token_id: this.config.image_token_id, ...kwargs, image_features: reshaped_image_hidden_states, }) } } ////////////////////////////////////////////////// export class Phi3VPreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', 'inputs_embeds', 'attention_mask', 'position_ids', 'pixel_values', 'image_sizes', 'past_key_values', ]; } export class Phi3VForCausalLM extends Phi3VPreTrainedModel { async forward({ // Produced by the tokenizer/processor: input_ids = null, attention_mask = null, pixel_values = null, image_sizes = null, // Used during generation: position_ids = null, inputs_embeds = null, past_key_values = null, // Generic generation parameters generation_config = null, logits_processor = null, // TODO: needed? ...kwargs }) { if (!inputs_embeds) { let image_features; if (pixel_values && input_ids.dims[1] !== 1) { if (!image_sizes) { throw new Error('`image_sizes` must be provided when `pixel_values` is provided.'); } // Encode the image ({ image_features } = await sessionRun(this.sessions['vision_encoder'], { pixel_values, image_sizes, })); } else { const hidden_size = this.config.normalized_config.hidden_size; image_features = new Tensor( 'float32', [], [0, hidden_size], ); } ({ inputs_embeds } = await sessionRun(this.sessions['prepare_inputs_embeds'], { input_ids, image_features, })); } const outputs = await decoderForward(this, { inputs_embeds, past_key_values, attention_mask, position_ids, generation_config, logits_processor, }, false); return outputs; } } ////////////////////////////////////////////////// export class CLIPPreTrainedModel extends PreTrainedModel { } /** * CLIP Text and Vision Model with a projection layers on top * * **Example:** Perform zero-shot image classification with a `CLIPModel`. * * ```javascript * import { AutoTokenizer, AutoProcessor, CLIPModel, RawImage } from '@huggingface/transformers'; * * // Load tokenizer, processor, and model * let tokenizer = await AutoTokenizer.from_pretrained('Xenova/clip-vit-base-patch16'); * let processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16'); * let model = await CLIPModel.from_pretrained('Xenova/clip-vit-base-patch16'); * * // Run tokenization * let texts = ['a photo of a car', 'a photo of a football match'] * let text_inputs = tokenizer(texts, { padding: true, truncation: true }); * * // Read image and run processor * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg'); * let image_inputs = await processor(image); * * // Run model with both text and pixel inputs * let output = await model({ ...text_inputs, ...image_inputs }); * // { * // logits_per_image: Tensor { * // dims: [ 1, 2 ], * // data: Float32Array(2) [ 18.579734802246094, 24.31830596923828 ], * // }, * // logits_per_text: Tensor { * // dims: [ 2, 1 ], * // data: Float32Array(2) [ 18.579734802246094, 24.31830596923828 ], * // }, * // text_embeds: Tensor { * // dims: [ 2, 512 ], * // data: Float32Array(1024) [ ... ], * // }, * // image_embeds: Tensor { * // dims: [ 1, 512 ], * // data: Float32Array(512) [ ... ], * // } * // } * ``` */ export class CLIPModel extends CLIPPreTrainedModel { } /** * The text model from CLIP without any head or projection on top. */ export class CLIPTextModel extends CLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'text_model', }); } } /** * CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output) * * **Example:** Compute text embeddings with `CLIPTextModelWithProjection`. * * ```javascript * import { AutoTokenizer, CLIPTextModelWithProjection } from '@huggingface/transformers'; * * // Load tokenizer and text model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clip-vit-base-patch16'); * const text_model = await CLIPTextModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16'); * * // Run tokenization * let texts = ['a photo of a car', 'a photo of a football match']; * let text_inputs = tokenizer(texts, { padding: true, truncation: true }); * * // Compute embeddings * const { text_embeds } = await text_model(text_inputs); * // Tensor { * // dims: [ 2, 512 ], * // type: 'float32', * // data: Float32Array(1024) [ ... ], * // size: 1024 * // } * ``` */ export class CLIPTextModelWithProjection extends CLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'text_model', }); } } /** * The vision model from CLIP without any head or projection on top. */ export class CLIPVisionModel extends CLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'vision_model', }); } } /** * CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output) * * **Example:** Compute vision embeddings with `CLIPVisionModelWithProjection`. * * ```javascript * import { AutoProcessor, CLIPVisionModelWithProjection, RawImage} from '@huggingface/transformers'; * * // Load processor and vision model * const processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16'); * const vision_model = await CLIPVisionModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16'); * * // Read image and run processor * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg'); * let image_inputs = await processor(image); * * // Compute embeddings * const { image_embeds } = await vision_model(image_inputs); * // Tensor { * // dims: [ 1, 512 ], * // type: 'float32', * // data: Float32Array(512) [ ... ], * // size: 512 * // } * ``` */ export class CLIPVisionModelWithProjection extends CLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'vision_model', }); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // SigLIP models export class SiglipPreTrainedModel extends PreTrainedModel { } /** * SigLIP Text and Vision Model with a projection layers on top * * **Example:** Perform zero-shot image classification with a `SiglipModel`. * * ```javascript * import { AutoTokenizer, AutoProcessor, SiglipModel, RawImage } from '@huggingface/transformers'; * * // Load tokenizer, processor, and model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-base-patch16-224'); * const processor = await AutoProcessor.from_pretrained('Xenova/siglip-base-patch16-224'); * const model = await SiglipModel.from_pretrained('Xenova/siglip-base-patch16-224'); * * // Run tokenization * const texts = ['a photo of 2 cats', 'a photo of 2 dogs']; * const text_inputs = tokenizer(texts, { padding: 'max_length', truncation: true }); * * // Read image and run processor * const image = await RawImage.read('http://images.cocodataset.org/val2017/000000039769.jpg'); * const image_inputs = await processor(image); * * // Run model with both text and pixel inputs * const output = await model({ ...text_inputs, ...image_inputs }); * // { * // logits_per_image: Tensor { * // dims: [ 1, 2 ], * // data: Float32Array(2) [ -1.6019744873046875, -10.720091819763184 ], * // }, * // logits_per_text: Tensor { * // dims: [ 2, 1 ], * // data: Float32Array(2) [ -1.6019744873046875, -10.720091819763184 ], * // }, * // text_embeds: Tensor { * // dims: [ 2, 768 ], * // data: Float32Array(1536) [ ... ], * // }, * // image_embeds: Tensor { * // dims: [ 1, 768 ], * // data: Float32Array(768) [ ... ], * // } * // } * ``` */ export class SiglipModel extends SiglipPreTrainedModel { } /** * The text model from SigLIP without any head or projection on top. * * **Example:** Compute text embeddings with `SiglipTextModel`. * * ```javascript * import { AutoTokenizer, SiglipTextModel } from '@huggingface/transformers'; * * // Load tokenizer and text model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-base-patch16-224'); * const text_model = await SiglipTextModel.from_pretrained('Xenova/siglip-base-patch16-224'); * * // Run tokenization * const texts = ['a photo of 2 cats', 'a photo of 2 dogs']; * const text_inputs = tokenizer(texts, { padding: 'max_length', truncation: true }); * * // Compute embeddings * const { pooler_output } = await text_model(text_inputs); * // Tensor { * // dims: [ 2, 768 ], * // type: 'float32', * // data: Float32Array(1536) [ ... ], * // size: 1536 * // } * ``` */ export class SiglipTextModel extends SiglipPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'text_model', }); } } /** * The vision model from SigLIP without any head or projection on top. * * **Example:** Compute vision embeddings with `SiglipVisionModel`. * * ```javascript * import { AutoProcessor, SiglipVisionModel, RawImage} from '@huggingface/transformers'; * * // Load processor and vision model * const processor = await AutoProcessor.from_pretrained('Xenova/siglip-base-patch16-224'); * const vision_model = await SiglipVisionModel.from_pretrained('Xenova/siglip-base-patch16-224'); * * // Read image and run processor * const image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg'); * const image_inputs = await processor(image); * * // Compute embeddings * const { pooler_output } = await vision_model(image_inputs); * // Tensor { * // dims: [ 1, 768 ], * // type: 'float32', * // data: Float32Array(768) [ ... ], * // size: 768 * // } * ``` */ export class SiglipVisionModel extends CLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'vision_model', }); } } ////////////////////////////////////////////////// // ChineseCLIP models export class ChineseCLIPPreTrainedModel extends PreTrainedModel { } export class ChineseCLIPModel extends ChineseCLIPPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // JinaCLIP models export class JinaCLIPPreTrainedModel extends PreTrainedModel { } export class JinaCLIPModel extends JinaCLIPPreTrainedModel { async forward(model_inputs) { const missing_text_inputs = !model_inputs.input_ids; const missing_image_inputs = !model_inputs.pixel_values; if (missing_text_inputs && missing_image_inputs) { throw new Error('Either `input_ids` or `pixel_values` should be provided.'); } // If either `input_ids` or `pixel_values` aren't passed, we need to create dummy input since the model requires a value to be specified. if (missing_text_inputs) { // NOTE: We cannot pass zero-dimension tensor as input for input_ids. // Fortunately, the majority of time is spent in the vision encoder, so this shouldn't significantly impact performance. model_inputs.input_ids = ones([model_inputs.pixel_values.dims[0], 1]); } if (missing_image_inputs) { // NOTE: Since we create a zero-sized tensor, this does not increase computation time. // @ts-ignore const { image_size } = this.config.vision_config; model_inputs.pixel_values = full([0, 3, image_size, image_size], 0.0); // (pass zero-dimension tensor) } const { text_embeddings, image_embeddings, l2norm_text_embeddings, l2norm_image_embeddings } = await super.forward(model_inputs); const result = {}; if (!missing_text_inputs) { result.text_embeddings = text_embeddings; result.l2norm_text_embeddings = l2norm_text_embeddings; } if (!missing_image_inputs) { result.image_embeddings = image_embeddings; result.l2norm_image_embeddings = l2norm_image_embeddings; } return result } } export class JinaCLIPTextModel extends JinaCLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'text_model', }); } } export class JinaCLIPVisionModel extends JinaCLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'vision_model', }); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // CLIPSeg models export class CLIPSegPreTrainedModel extends PreTrainedModel { } export class CLIPSegModel extends CLIPSegPreTrainedModel { } /** * CLIPSeg model with a Transformer-based decoder on top for zero-shot and one-shot image segmentation. * * **Example:** Perform zero-shot image segmentation with a `CLIPSegForImageSegmentation` model. * * ```javascript * import { AutoTokenizer, AutoProcessor, CLIPSegForImageSegmentation, RawImage } from '@huggingface/transformers'; * * // Load tokenizer, processor, and model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clipseg-rd64-refined'); * const processor = await AutoProcessor.from_pretrained('Xenova/clipseg-rd64-refined'); * const model = await CLIPSegForImageSegmentation.from_pretrained('Xenova/clipseg-rd64-refined'); * * // Run tokenization * const texts = ['a glass', 'something to fill', 'wood', 'a jar']; * const text_inputs = tokenizer(texts, { padding: true, truncation: true }); * * // Read image and run processor * const image = await RawImage.read('https://github.com/timojl/clipseg/blob/master/example_image.jpg?raw=true'); * const image_inputs = await processor(image); * * // Run model with both text and pixel inputs * const { logits } = await model({ ...text_inputs, ...image_inputs }); * // logits: Tensor { * // dims: [4, 352, 352], * // type: 'float32', * // data: Float32Array(495616) [ ... ], * // size: 495616 * // } * ``` * * You can visualize the predictions as follows: * ```javascript * const preds = logits * .unsqueeze_(1) * .sigmoid_() * .mul_(255) * .round_() * .to('uint8'); * * for (let i = 0; i < preds.dims[0]; ++i) { * const img = RawImage.fromTensor(preds[i]); * img.save(`prediction_${i}.png`); * } * ``` */ export class CLIPSegForImageSegmentation extends CLIPSegPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // GPT2 models export class GPT2PreTrainedModel extends PreTrainedModel { } export class GPT2Model extends GPT2PreTrainedModel { } /** * GPT-2 language model head on top of the GPT-2 base model. This model is suitable for text generation tasks. */ export class GPT2LMHeadModel extends GPT2PreTrainedModel { } // export class GPT2ForSequenceClassification extends GPT2PreTrainedModel { // TODO // } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // JAIS models export class JAISPreTrainedModel extends PreTrainedModel { } /** * The bare JAIS Model transformer outputting raw hidden-states without any specific head on top. */ export class JAISModel extends JAISPreTrainedModel { } /** * The JAIS Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class JAISLMHeadModel extends JAISPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // GPTNeo models export class GPTNeoPreTrainedModel extends PreTrainedModel { } export class GPTNeoModel extends GPTNeoPreTrainedModel { } export class GPTNeoForCausalLM extends GPTNeoPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // GPTNeoX models export class GPTNeoXPreTrainedModel extends PreTrainedModel { } export class GPTNeoXModel extends GPTNeoXPreTrainedModel { } export class GPTNeoXForCausalLM extends GPTNeoXPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // GPT-J models export class GPTJPreTrainedModel extends PreTrainedModel { } export class GPTJModel extends GPTJPreTrainedModel { } export class GPTJForCausalLM extends GPTJPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // GPTBigCode models export class GPTBigCodePreTrainedModel extends PreTrainedModel { } export class GPTBigCodeModel extends GPTBigCodePreTrainedModel { } export class GPTBigCodeForCausalLM extends GPTBigCodePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // CodeGen models export class CodeGenPreTrainedModel extends PreTrainedModel { } /** * CodeGenModel is a class representing a code generation model without a language model head. */ export class CodeGenModel extends CodeGenPreTrainedModel { } /** * CodeGenForCausalLM is a class that represents a code generation model based on the GPT-2 architecture. It extends the `CodeGenPreTrainedModel` class. */ export class CodeGenForCausalLM extends CodeGenPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // LLama models /** * The bare LLama Model outputting raw hidden-states without any specific head on top. */ export class LlamaPreTrainedModel extends PreTrainedModel { } /** * The bare LLaMA Model outputting raw hidden-states without any specific head on top. */ export class LlamaModel extends LlamaPreTrainedModel { } export class LlamaForCausalLM extends LlamaPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Helium models export class HeliumPreTrainedModel extends PreTrainedModel { } export class HeliumModel extends HeliumPreTrainedModel { } export class HeliumForCausalLM extends HeliumPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Glm models export class GlmPreTrainedModel extends PreTrainedModel { } export class GlmModel extends GlmPreTrainedModel { } export class GlmForCausalLM extends GlmPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // EXAONE models export class ExaonePreTrainedModel extends PreTrainedModel { } export class ExaoneModel extends ExaonePreTrainedModel { } export class ExaoneForCausalLM extends ExaonePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileLLM models export class MobileLLMPreTrainedModel extends PreTrainedModel { } export class MobileLLMModel extends MobileLLMPreTrainedModel { } export class MobileLLMForCausalLM extends MobileLLMPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // OLMo models export class OlmoPreTrainedModel extends PreTrainedModel { } export class OlmoModel extends OlmoPreTrainedModel { } export class OlmoForCausalLM extends OlmoPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // OLMo2 models export class Olmo2PreTrainedModel extends PreTrainedModel { } export class Olmo2Model extends Olmo2PreTrainedModel { } export class Olmo2ForCausalLM extends Olmo2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Granite models export class GranitePreTrainedModel extends PreTrainedModel { } export class GraniteModel extends GranitePreTrainedModel { } export class GraniteForCausalLM extends GranitePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Cohere models /** * The bare Cohere Model outputting raw hidden-states without any specific head on top. */ export class CoherePreTrainedModel extends PreTrainedModel { } export class CohereModel extends CoherePreTrainedModel { } export class CohereForCausalLM extends CoherePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Gemma models /** * The bare Gemma Model outputting raw hidden-states without any specific head on top. */ export class GemmaPreTrainedModel extends PreTrainedModel { } /** * The bare Gemma Model outputting raw hidden-states without any specific head on top. */ export class GemmaModel extends GemmaPreTrainedModel { } export class GemmaForCausalLM extends GemmaPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Gemma2 models /** * The bare Gemma2 Model outputting raw hidden-states without any specific head on top. */ export class Gemma2PreTrainedModel extends PreTrainedModel { } /** * The bare Gemma2 Model outputting raw hidden-states without any specific head on top. */ export class Gemma2Model extends Gemma2PreTrainedModel { } export class Gemma2ForCausalLM extends Gemma2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class OpenELMPreTrainedModel extends PreTrainedModel { } export class OpenELMModel extends OpenELMPreTrainedModel { } export class OpenELMForCausalLM extends OpenELMPreTrainedModel { } ////////////////////////////////////////////////// // Qwen2 models /** * The bare Qwen2 Model outputting raw hidden-states without any specific head on top. */ export class Qwen2PreTrainedModel extends PreTrainedModel { } /** * The bare Qwen2 Model outputting raw hidden-states without any specific head on top. */ export class Qwen2Model extends Qwen2PreTrainedModel { } export class Qwen2ForCausalLM extends Qwen2PreTrainedModel { } ////////////////////////////////////////////////// export class Qwen2VLPreTrainedModel extends PreTrainedModel { forward_params = [ // Text inputs 'input_ids', 'attention_mask', 'position_ids', 'past_key_values', // Vision inputs 'pixel_values', 'image_grid_thw', ]; } export class Qwen2VLForConditionalGeneration extends Qwen2VLPreTrainedModel { /** * Calculate the 3D rope index based on image and video's temporal, height and width in LLM. * * Explanation: * Each embedding sequence contains vision embedding and text embedding or just contains text embedding. * * For pure text embedding sequence, the rotary position embedding has no difference with mordern LLMs. * Examples: * input_ids: [T T T T T], here T is for text. * temporal position_ids: [0, 1, 2, 3, 4] * height position_ids: [0, 1, 2, 3, 4] * width position_ids: [0, 1, 2, 3, 4] * * For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part * and 1D rotary position embeddin for text part. * Examples: * Assume we have a video input with 3 temporal patches, 2 height patches and 2 width patches. * input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. * vision temporal position_ids: [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2] * vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] * vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] * text temporal position_ids: [3, 4, 5, 6, 7] * text height position_ids: [3, 4, 5, 6, 7] * text width position_ids: [3, 4, 5, 6, 7] * Here we calculate the text start position_ids as the max vision position_ids plus 1. * * @param {Tensor} input_ids Indices of input sequence tokens in the vocabulary. Tensor of shape `(batch_size, sequence_length)`. * @param {Tensor} image_grid_thw (Optional) The temporal, height and width of feature shape of each image in LLM. Tensor of shape `(num_images, 3)`. * @param {Tensor} video_grid_thw (Optional) The temporal, height and width of feature shape of each video in LLM. Tensor of shape `(num_videos, 3)`. * @param {Tensor} attention_mask (Optional) Mask to avoid performing attention on padding token indices. Tensor of shape `(batch_size, sequence_length)`. Mask values selected in `[0, 1]`: * - 1 for tokens that are **not masked**, * - 0 for tokens that are **masked**. * @returns {[Tensor, Tensor]} [position_ids, mrope_position_deltas] with: * - position_ids: Tensor of shape `(3, batch_size, sequence_length)`. * - mrope_position_deltas: Tensor of shape `(batch_size)`. */ get_rope_index(input_ids, image_grid_thw, video_grid_thw, attention_mask) { // @ts-ignore const { vision_config, image_token_id, video_token_id, vision_start_token_id } = this.config; const spatial_merge_size = vision_config.spatial_merge_size ?? 2; const mrope_position_deltas = []; if (image_grid_thw || video_grid_thw) { let total_input_ids = input_ids.tolist(); if (!attention_mask) { attention_mask = ones_like(input_ids); } const attention_mask_list = attention_mask.tolist(); const position_ids_list = Array.from({ length: 3 }, _ => Array.from({ length: input_ids.dims[0] }, _ => Array.from({ length: input_ids.dims[1] }, _ => 1))); const image_grid_thw_list = image_grid_thw ? image_grid_thw.tolist() : []; const video_grid_thw_list = video_grid_thw ? video_grid_thw.tolist() : []; let image_index = 0; let video_index = 0; for (let i = 0; i < total_input_ids.length; ++i) { const ids = total_input_ids[i].filter((_, j) => attention_mask_list[i][j] == 1); const vision_start_indices = ids.reduce((acc, x, idx) => { if (x == vision_start_token_id) acc.push(idx); return acc; }, []); const vision_tokens = vision_start_indices.map(x => ids[x + 1]); const image_nums = vision_tokens.filter(x => x == image_token_id).length; const video_nums = vision_tokens.filter(x => x == video_token_id).length; /** @type {number[][]} */ let llm_pos_ids_list = []; let st = 0; let remain_images = image_nums; let remain_videos = video_nums; for (let j = 0; j < vision_tokens.length; ++j) { const next_image_token = ids.findIndex((x, i) => i > st && x == image_token_id); const next_video_token = ids.findIndex((x, i) => i > st && x == video_token_id); const ed_image = (remain_images > 0 && next_image_token !== -1) ? next_image_token : ids.length + 1; const ed_video = (remain_videos > 0 && next_video_token !== -1) ? next_video_token : ids.length + 1; let ed; let t, h, w; if (ed_image < ed_video) { ([t, h, w] = image_grid_thw_list[image_index]); ++image_index; --remain_images; ed = ed_image; } else { ([t, h, w] = video_grid_thw_list[video_index]); ++video_index; --remain_videos; ed = ed_video; } const [llm_grid_t, llm_grid_h, llm_grid_w] = [ Number(t), Math.floor(Number(h) / spatial_merge_size), Math.floor(Number(w) / spatial_merge_size) ] const text_len = ed - st; const st_idx = llm_pos_ids_list.length > 0 ? max(llm_pos_ids_list.at(-1))[0] + 1 : 0; llm_pos_ids_list.push( Array.from({ length: 3 * text_len }, (_, i) => st_idx + (i % text_len)) ) const offset = text_len + st_idx; const grid_size = llm_grid_t * llm_grid_h * llm_grid_w; const t_index = Array.from({ length: grid_size }, (_, i) => offset + Math.floor(i / (llm_grid_h * llm_grid_w))) const h_index = Array.from({ length: grid_size }, (_, i) => offset + Math.floor(i / llm_grid_w) % llm_grid_h) const w_index = Array.from({ length: grid_size }, (_, i) => offset + i % llm_grid_w) llm_pos_ids_list.push([t_index, h_index, w_index].flat()) st = ed + grid_size; } if (st < ids.length) { const st_idx = llm_pos_ids_list.length > 0 ? max(llm_pos_ids_list.at(-1))[0] + 1 : 0; const text_len = ids.length - st; llm_pos_ids_list.push( Array.from({ length: 3 * text_len }, (_, i) => (st_idx + (i % text_len))) ) } // NOTE: Each item in llm_pos_ids_list is an array of shape (3, text_len), // meaning to perform concatenation along dim=1, we can do the following: const num_items = llm_pos_ids_list.reduce((acc, x) => acc + x.length, 0); /** @type {number[]} */ const llm_positions = new Array(num_items); let index = 0; for (let x = 0; x < 3; ++x) { for (let y = 0; y < llm_pos_ids_list.length; ++y) { const val = llm_pos_ids_list[y]; const text_len = val.length / 3; for (let z = x * text_len; z < (x + 1) * text_len; ++z) { llm_positions[index++] = val[z]; } } } let count = 0; const attn_mask = attention_mask_list[i]; for (let y = 0; y < attn_mask.length; ++y) { if (attn_mask[y] == 1) { for (let x = 0; x < 3; ++x) { position_ids_list[x][i][y] = llm_positions[x * num_items / 3 + count]; } ++count; } } const max_llm_positions = max(llm_positions)[0]; mrope_position_deltas.push(max_llm_positions + 1 - total_input_ids[i].length); } return [ new Tensor('int64', position_ids_list.flat(Infinity), [3, input_ids.dims[0], input_ids.dims[1]]), new Tensor('int64', mrope_position_deltas, [mrope_position_deltas.length, 1]), ]; } else { // Text-only if (attention_mask) { const { data, dims } = cumsum_masked_fill(attention_mask); const position_ids = BigInt64Array.from( { length: 3 * data.length }, (_, i) => data[i % data.length] ); /** @type {bigint[]} */ const mrope_position_deltas = Array.from( { length: dims[0] }, (_, i) => max(data.subarray(dims[1] * i, dims[1] * (i + 1)))[0] + 1n + BigInt(dims[1]) ); return [ new Tensor('int64', position_ids, [3, ...dims]), new Tensor('int64', mrope_position_deltas, [mrope_position_deltas.length, 1]), ] } else { const [batch_size, seq_length] = input_ids.dims; const position_ids = BigInt64Array.from( { length: 3 * batch_size * seq_length }, (_, i) => BigInt(Math.floor(i % seq_length / batch_size)), ); return [ new Tensor('int64', position_ids, [3, ...input_ids.dims]), zeros([batch_size, 1]), ] } } } async encode_image({ pixel_values, image_grid_thw }) { const features = (await sessionRun(this.sessions['vision_encoder'], { pixel_values, grid_thw: image_grid_thw })).image_features; return features; } _merge_input_ids_with_image_features(kwargs) { return default_merge_input_ids_with_image_features({ // @ts-ignore image_token_id: this.config.image_token_id, ...kwargs }) } prepare_inputs_for_generation(input_ids, model_inputs, generation_config) { // Overwritten -- in specific circumstances we don't want to forward image inputs to the model if (model_inputs.attention_mask && !model_inputs.position_ids) { // Calculate position_ids and rope_deltas if (!model_inputs.past_key_values) { ([model_inputs.position_ids, model_inputs.rope_deltas] = this.get_rope_index( model_inputs.input_ids, model_inputs.image_grid_thw, model_inputs.video_grid_thw, model_inputs.attention_mask, )); } else { model_inputs.pixel_values = null; // model_inputs.pixel_values_videos = null; const delta = BigInt(Object.values(model_inputs.past_key_values)[0].dims.at(-2)); const rope_deltas_list = model_inputs.rope_deltas.map(x => delta + x); model_inputs.position_ids = stack([rope_deltas_list, rope_deltas_list, rope_deltas_list], 0) } } return model_inputs; } } ////////////////////////////////////////////////// // Phi models export class PhiPreTrainedModel extends PreTrainedModel { } /** * The bare Phi Model outputting raw hidden-states without any specific head on top. */ export class PhiModel extends PhiPreTrainedModel { } export class PhiForCausalLM extends PhiPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Phi3 models export class Phi3PreTrainedModel extends PreTrainedModel { } /** * The bare Phi3 Model outputting raw hidden-states without any specific head on top. */ export class Phi3Model extends Phi3PreTrainedModel { } export class Phi3ForCausalLM extends Phi3PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Bloom models /** * The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class BloomPreTrainedModel extends PreTrainedModel { } /** * The bare Bloom Model transformer outputting raw hidden-states without any specific head on top. */ export class BloomModel extends BloomPreTrainedModel { } /** * The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class BloomForCausalLM extends BloomPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MPT models export class MptPreTrainedModel extends PreTrainedModel { } /** * The bare Mpt Model transformer outputting raw hidden-states without any specific head on top. */ export class MptModel extends MptPreTrainedModel { } /** * The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class MptForCausalLM extends MptPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // OPT models export class OPTPreTrainedModel extends PreTrainedModel { } /** * The bare OPT Model outputting raw hidden-states without any specific head on top. */ export class OPTModel extends OPTPreTrainedModel { } /** * The OPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class OPTForCausalLM extends OPTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class ViTPreTrainedModel extends PreTrainedModel { } export class ViTModel extends ViTPreTrainedModel { } export class ViTForImageClassification extends ViTPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class IJepaPreTrainedModel extends PreTrainedModel { } export class IJepaModel extends IJepaPreTrainedModel { } export class IJepaForImageClassification extends IJepaPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class VitPosePreTrainedModel extends PreTrainedModel { } /** * The VitPose model with a pose estimation head on top. */ export class VitPoseForPoseEstimation extends VitPosePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class PvtPreTrainedModel extends PreTrainedModel { } export class PvtModel extends PvtPreTrainedModel { } export class PvtForImageClassification extends PvtPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class ViTMAEPreTrainedModel extends PreTrainedModel { } export class ViTMAEModel extends ViTMAEPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class ViTMSNPreTrainedModel extends PreTrainedModel { } export class ViTMSNModel extends ViTMSNPreTrainedModel { } export class ViTMSNForImageClassification extends ViTMSNPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class GroupViTPreTrainedModel extends PreTrainedModel { } export class GroupViTModel extends GroupViTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class FastViTPreTrainedModel extends PreTrainedModel { } export class FastViTModel extends FastViTPreTrainedModel { } export class FastViTForImageClassification extends FastViTPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class VitMattePreTrainedModel extends PreTrainedModel { } /** * ViTMatte framework leveraging any vision backbone e.g. for ADE20k, CityScapes. * * **Example:** Perform image matting with a `VitMatteForImageMatting` model. * ```javascript * import { AutoProcessor, VitMatteForImageMatting, RawImage } from '@huggingface/transformers'; * * // Load processor and model * const processor = await AutoProcessor.from_pretrained('Xenova/vitmatte-small-distinctions-646'); * const model = await VitMatteForImageMatting.from_pretrained('Xenova/vitmatte-small-distinctions-646'); * * // Load image and trimap * const image = await RawImage.fromURL('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/vitmatte_image.png'); * const trimap = await RawImage.fromURL('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/vitmatte_trimap.png'); * * // Prepare image + trimap for the model * const inputs = await processor(image, trimap); * * // Predict alpha matte * const { alphas } = await model(inputs); * // Tensor { * // dims: [ 1, 1, 640, 960 ], * // type: 'float32', * // size: 614400, * // data: Float32Array(614400) [ 0.9894027709960938, 0.9970508813858032, ... ] * // } * ``` * * You can visualize the alpha matte as follows: * ```javascript * import { Tensor, cat } from '@huggingface/transformers'; * * // Visualize predicted alpha matte * const imageTensor = image.toTensor(); * * // Convert float (0-1) alpha matte to uint8 (0-255) * const alphaChannel = alphas * .squeeze(0) * .mul_(255) * .clamp_(0, 255) * .round_() * .to('uint8'); * * // Concatenate original image with predicted alpha * const imageData = cat([imageTensor, alphaChannel], 0); * * // Save output image * const outputImage = RawImage.fromTensor(imageData); * outputImage.save('output.png'); * ``` */ export class VitMatteForImageMatting extends VitMattePreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new ImageMattingOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class MobileViTPreTrainedModel extends PreTrainedModel { } export class MobileViTModel extends MobileViTPreTrainedModel { } export class MobileViTForImageClassification extends MobileViTPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } // TODO: MobileViTForSemanticSegmentation ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class MobileViTV2PreTrainedModel extends PreTrainedModel { } export class MobileViTV2Model extends MobileViTV2PreTrainedModel { } export class MobileViTV2ForImageClassification extends MobileViTV2PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } // TODO: MobileViTV2ForSemanticSegmentation ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class OwlViTPreTrainedModel extends PreTrainedModel { } export class OwlViTModel extends OwlViTPreTrainedModel { } export class OwlViTForObjectDetection extends OwlViTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Owlv2PreTrainedModel extends PreTrainedModel { } export class Owlv2Model extends Owlv2PreTrainedModel { } export class Owlv2ForObjectDetection extends Owlv2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Beit Models export class BeitPreTrainedModel extends PreTrainedModel { } export class BeitModel extends BeitPreTrainedModel { } export class BeitForImageClassification extends BeitPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DetrPreTrainedModel extends PreTrainedModel { } export class DetrModel extends DetrPreTrainedModel { } export class DetrForObjectDetection extends DetrPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new DetrObjectDetectionOutput(await super._call(model_inputs)); } } export class DetrForSegmentation extends DetrPreTrainedModel { /** * Runs the model with the provided inputs * @param {Object} model_inputs Model inputs * @returns {Promise<DetrSegmentationOutput>} Object containing segmentation outputs */ async _call(model_inputs) { return new DetrSegmentationOutput(await super._call(model_inputs)); } } export class DetrObjectDetectionOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification logits (including no-object) for all queries. * @param {Tensor} output.pred_boxes Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). * These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). */ constructor({ logits, pred_boxes }) { super(); this.logits = logits; this.pred_boxes = pred_boxes; } } export class DetrSegmentationOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits The output logits of the model. * @param {Tensor} output.pred_boxes Predicted boxes. * @param {Tensor} output.pred_masks Predicted masks. */ constructor({ logits, pred_boxes, pred_masks }) { super(); this.logits = logits; this.pred_boxes = pred_boxes; this.pred_masks = pred_masks; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class RTDetrPreTrainedModel extends PreTrainedModel { } export class RTDetrModel extends RTDetrPreTrainedModel { } export class RTDetrForObjectDetection extends RTDetrPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new RTDetrObjectDetectionOutput(await super._call(model_inputs)); } } export class RTDetrObjectDetectionOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification logits (including no-object) for all queries. * @param {Tensor} output.pred_boxes Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). * These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). */ constructor({ logits, pred_boxes }) { super(); this.logits = logits; this.pred_boxes = pred_boxes; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class TableTransformerPreTrainedModel extends PreTrainedModel { } /** * The bare Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) * outputting raw hidden-states without any specific head on top. */ export class TableTransformerModel extends TableTransformerPreTrainedModel { } /** * Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) * with object detection heads on top, for tasks such as COCO detection. */ export class TableTransformerForObjectDetection extends TableTransformerPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new TableTransformerObjectDetectionOutput(await super._call(model_inputs)); } } export class TableTransformerObjectDetectionOutput extends DetrObjectDetectionOutput { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DeiTPreTrainedModel extends PreTrainedModel { } export class DeiTModel extends DeiTPreTrainedModel { } export class DeiTForImageClassification extends DeiTPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class HieraPreTrainedModel extends PreTrainedModel { } export class HieraModel extends HieraPreTrainedModel { } export class HieraForImageClassification extends HieraPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// /** * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. */ export class ResNetPreTrainedModel extends PreTrainedModel { } /** * The bare ResNet model outputting raw features without any specific head on top. */ export class ResNetModel extends ResNetPreTrainedModel { } /** * ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. */ export class ResNetForImageClassification extends ResNetPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class SwinPreTrainedModel extends PreTrainedModel { } export class SwinModel extends SwinPreTrainedModel { } export class SwinForImageClassification extends SwinPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Swin2SRPreTrainedModel extends PreTrainedModel { } /** * The bare Swin2SR Model transformer outputting raw hidden-states without any specific head on top. */ export class Swin2SRModel extends Swin2SRPreTrainedModel { } /** * Swin2SR Model transformer with an upsampler head on top for image super resolution and restoration. * * **Example:** Super-resolution w/ `Xenova/swin2SR-classical-sr-x2-64`. * * ```javascript * import { AutoProcessor, Swin2SRForImageSuperResolution, RawImage } from '@huggingface/transformers'; * * // Load processor and model * const model_id = 'Xenova/swin2SR-classical-sr-x2-64'; * const processor = await AutoProcessor.from_pretrained(model_id); * const model = await Swin2SRForImageSuperResolution.from_pretrained(model_id); * * // Prepare model inputs * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/butterfly.jpg'; * const image = await RawImage.fromURL(url); * const inputs = await processor(image); * * // Run model * const outputs = await model(inputs); * * // Convert Tensor to RawImage * const output = outputs.reconstruction.squeeze().clamp_(0, 1).mul_(255).round_().to('uint8'); * const outputImage = RawImage.fromTensor(output); * // RawImage { * // data: Uint8Array(786432) [ 41, 31, 24, ... ], * // width: 512, * // height: 512, * // channels: 3 * // } * ``` */ export class Swin2SRForImageSuperResolution extends Swin2SRPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DPTPreTrainedModel extends PreTrainedModel { } /** * The bare DPT Model transformer outputting raw hidden-states without any specific head on top. */ export class DPTModel extends DPTPreTrainedModel { } /** * DPT Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2. * * **Example:** Depth estimation w/ `Xenova/dpt-hybrid-midas`. * ```javascript * import { DPTForDepthEstimation, AutoProcessor, RawImage, interpolate_4d } from '@huggingface/transformers'; * * // Load model and processor * const model_id = 'Xenova/dpt-hybrid-midas'; * const model = await DPTForDepthEstimation.from_pretrained(model_id); * const processor = await AutoProcessor.from_pretrained(model_id); * * // Load image from URL * const url = 'http://images.cocodataset.org/val2017/000000039769.jpg'; * const image = await RawImage.read(url); * * // Prepare image for the model * const inputs = await processor(image); * * // Run model * const { predicted_depth } = await model(inputs); * * // Interpolate to original size * const prediction = (await interpolate_4d(predicted_depth.unsqueeze(1), { * size: image.size.reverse(), * mode: 'bilinear', * })).squeeze(1); * * // Visualize the prediction * const min = prediction.min().item(); * const max = prediction.max().item(); * const formatted = prediction.sub_(min).div_(max - min).mul_(255).to('uint8'); * const depth = RawImage.fromTensor(formatted); * // RawImage { * // data: Uint8Array(307200) [ 85, 85, 84, ... ], * // width: 640, * // height: 480, * // channels: 1 * // } * ``` */ export class DPTForDepthEstimation extends DPTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DepthAnythingPreTrainedModel extends PreTrainedModel { } /** * Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2. */ export class DepthAnythingForDepthEstimation extends DepthAnythingPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class SapiensPreTrainedModel extends PreTrainedModel { } export class SapiensForSemanticSegmentation extends SapiensPreTrainedModel { } export class SapiensForDepthEstimation extends SapiensPreTrainedModel { } export class SapiensForNormalEstimation extends SapiensPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DepthProPreTrainedModel extends PreTrainedModel { } export class DepthProForDepthEstimation extends DepthProPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class MaskFormerPreTrainedModel extends PreTrainedModel { } export class MaskFormerModel extends MaskFormerPreTrainedModel { } export class MaskFormerForInstanceSegmentation extends MaskFormerPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class GLPNPreTrainedModel extends PreTrainedModel { } /** * The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top. */ export class GLPNModel extends GLPNPreTrainedModel { } /** * import { GLPNForDepthEstimation, AutoProcessor, RawImage, interpolate_4d } from '@huggingface/transformers'; * * // Load model and processor * const model_id = 'Xenova/glpn-kitti'; * const model = await GLPNForDepthEstimation.from_pretrained(model_id); * const processor = await AutoProcessor.from_pretrained(model_id); * * // Load image from URL * const url = 'http://images.cocodataset.org/val2017/000000039769.jpg'; * const image = await RawImage.read(url); * * // Prepare image for the model * const inputs = await processor(image); * * // Run model * const { predicted_depth } = await model(inputs); * * // Interpolate to original size * const prediction = (await interpolate_4d(predicted_depth.unsqueeze(1), { * size: image.size.reverse(), * mode: 'bilinear', * })).squeeze(1); * * // Visualize the prediction * const min = prediction.min().item(); * const max = prediction.max().item(); * const formatted = prediction.sub_(min).div_(max - min).mul_(255).to('uint8'); * const depth = RawImage.fromTensor(formatted); * // RawImage { * // data: Uint8Array(307200) [ 85, 85, 84, ... ], * // width: 640, * // height: 480, * // channels: 1 * // } * ``` */ export class GLPNForDepthEstimation extends GLPNPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DonutSwinPreTrainedModel extends PreTrainedModel { } /** * The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top. * * **Example:** Step-by-step Document Parsing. * * ```javascript * import { AutoProcessor, AutoTokenizer, AutoModelForVision2Seq, RawImage } from '@huggingface/transformers'; * * // Choose model to use * const model_id = 'Xenova/donut-base-finetuned-cord-v2'; * * // Prepare image inputs * const processor = await AutoProcessor.from_pretrained(model_id); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/receipt.png'; * const image = await RawImage.read(url); * const image_inputs = await processor(image); * * // Prepare decoder inputs * const tokenizer = await AutoTokenizer.from_pretrained(model_id); * const task_prompt = '<s_cord-v2>'; * const decoder_input_ids = tokenizer(task_prompt, { * add_special_tokens: false, * }).input_ids; * * // Create the model * const model = await AutoModelForVision2Seq.from_pretrained(model_id); * * // Run inference * const output = await model.generate(image_inputs.pixel_values, { * decoder_input_ids, * max_length: model.config.decoder.max_position_embeddings, * }); * * // Decode output * const decoded = tokenizer.batch_decode(output)[0]; * // <s_cord-v2><s_menu><s_nm> CINNAMON SUGAR</s_nm><s_unitprice> 17,000</s_unitprice><s_cnt> 1 x</s_cnt><s_price> 17,000</s_price></s_menu><s_sub_total><s_subtotal_price> 17,000</s_subtotal_price></s_sub_total><s_total><s_total_price> 17,000</s_total_price><s_cashprice> 20,000</s_cashprice><s_changeprice> 3,000</s_changeprice></s_total></s> * ``` * * **Example:** Step-by-step Document Visual Question Answering (DocVQA) * * ```javascript * import { AutoProcessor, AutoTokenizer, AutoModelForVision2Seq, RawImage } from '@huggingface/transformers'; * * // Choose model to use * const model_id = 'Xenova/donut-base-finetuned-docvqa'; * * // Prepare image inputs * const processor = await AutoProcessor.from_pretrained(model_id); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/invoice.png'; * const image = await RawImage.read(url); * const image_inputs = await processor(image); * * // Prepare decoder inputs * const tokenizer = await AutoTokenizer.from_pretrained(model_id); * const question = 'What is the invoice number?'; * const task_prompt = `<s_docvqa><s_question>${question}</s_question><s_answer>`; * const decoder_input_ids = tokenizer(task_prompt, { * add_special_tokens: false, * }).input_ids; * * // Create the model * const model = await AutoModelForVision2Seq.from_pretrained(model_id); * * // Run inference * const output = await model.generate(image_inputs.pixel_values, { * decoder_input_ids, * max_length: model.config.decoder.max_position_embeddings, * }); * * // Decode output * const decoded = tokenizer.batch_decode(output)[0]; * // <s_docvqa><s_question> What is the invoice number?</s_question><s_answer> us-001</s_answer></s> * ``` */ export class DonutSwinModel extends DonutSwinPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class ConvNextPreTrainedModel extends PreTrainedModel { } /** * The bare ConvNext model outputting raw features without any specific head on top. */ export class ConvNextModel extends ConvNextPreTrainedModel { } /** * ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. */ export class ConvNextForImageClassification extends ConvNextPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class ConvNextV2PreTrainedModel extends PreTrainedModel { } /** * The bare ConvNextV2 model outputting raw features without any specific head on top. */ export class ConvNextV2Model extends ConvNextV2PreTrainedModel { } /** * ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. */ export class ConvNextV2ForImageClassification extends ConvNextV2PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Dinov2PreTrainedModel extends PreTrainedModel { } /** * The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top. */ export class Dinov2Model extends Dinov2PreTrainedModel { } /** * Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. */ export class Dinov2ForImageClassification extends Dinov2PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Dinov2WithRegistersPreTrainedModel extends PreTrainedModel { } /** * The bare Dinov2WithRegisters Model transformer outputting raw hidden-states without any specific head on top. */ export class Dinov2WithRegistersModel extends Dinov2WithRegistersPreTrainedModel { } /** * Dinov2WithRegisters Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. */ export class Dinov2WithRegistersForImageClassification extends Dinov2WithRegistersPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// export class GroundingDinoPreTrainedModel extends PreTrainedModel { } export class GroundingDinoForObjectDetection extends GroundingDinoPreTrainedModel { } ////////////////////////////////////////////////// export class YolosPreTrainedModel extends PreTrainedModel { } export class YolosModel extends YolosPreTrainedModel { } export class YolosForObjectDetection extends YolosPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new YolosObjectDetectionOutput(await super._call(model_inputs)); } } export class YolosObjectDetectionOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification logits (including no-object) for all queries. * @param {Tensor} output.pred_boxes Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). * These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). */ constructor({ logits, pred_boxes }) { super(); this.logits = logits; this.pred_boxes = pred_boxes; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class SamPreTrainedModel extends PreTrainedModel { } /** * Segment Anything Model (SAM) for generating segmentation masks, given an input image * and optional 2D location and bounding boxes. * * **Example:** Perform mask generation w/ `Xenova/sam-vit-base`. * ```javascript * import { SamModel, AutoProcessor, RawImage } from '@huggingface/transformers'; * * const model = await SamModel.from_pretrained('Xenova/sam-vit-base'); * const processor = await AutoProcessor.from_pretrained('Xenova/sam-vit-base'); * * const img_url = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'; * const raw_image = await RawImage.read(img_url); * const input_points = [[[450, 600]]] // 2D localization of a window * * const inputs = await processor(raw_image, { input_points }); * const outputs = await model(inputs); * * const masks = await processor.post_process_masks(outputs.pred_masks, inputs.original_sizes, inputs.reshaped_input_sizes); * // [ * // Tensor { * // dims: [ 1, 3, 1764, 2646 ], * // type: 'bool', * // data: Uint8Array(14002632) [ ... ], * // size: 14002632 * // } * // ] * const scores = outputs.iou_scores; * // Tensor { * // dims: [ 1, 1, 3 ], * // type: 'float32', * // data: Float32Array(3) [ * // 0.8892380595207214, * // 0.9311248064041138, * // 0.983696699142456 * // ], * // size: 3 * // } * ``` */ export class SamModel extends SamPreTrainedModel { /** * Compute image embeddings and positional image embeddings, given the pixel values of an image. * @param {Object} model_inputs Object containing the model inputs. * @param {Tensor} model_inputs.pixel_values Pixel values obtained using a `SamProcessor`. * @returns {Promise<{ image_embeddings: Tensor, image_positional_embeddings: Tensor }>} The image embeddings and positional image embeddings. */ async get_image_embeddings({ pixel_values }) { // in: // - pixel_values: tensor.float32[batch_size,3,1024,1024] // // out: // - image_embeddings: tensor.float32[batch_size,256,64,64] // - image_positional_embeddings: tensor.float32[batch_size,256,64,64] return await encoderForward(this, { pixel_values }) } /** * @typedef {Object} SamModelInputs Object containing the model inputs. * @property {Tensor} pixel_values Pixel values as a Tensor with shape `(batch_size, num_channels, height, width)`. * These can be obtained using a `SamProcessor`. * @property {Tensor} [input_points] Input 2D spatial points with shape `(batch_size, num_points, 2)`. * This is used by the prompt encoder to encode the prompt. * @property {Tensor} [input_labels] Input labels for the points, as a Tensor of shape `(batch_size, point_batch_size, num_points)`. * This is used by the prompt encoder to encode the prompt. There are 4 types of labels: * - `1`: the point is a point that contains the object of interest * - `0`: the point is a point that does not contain the object of interest * - `-1`: the point corresponds to the background * - `-10`: the point is a padding point, thus should be ignored by the prompt encoder * @property {Tensor} [input_boxes] Input bounding boxes with shape `(batch_size, num_boxes, 4)`. * @property {Tensor} [image_embeddings] Image embeddings used by the mask decoder. * @property {Tensor} [image_positional_embeddings] Image positional embeddings used by the mask decoder. */ /** * @param {SamModelInputs} model_inputs Object containing the model inputs. * @returns {Promise<Object>} The output of the model. */ async forward(model_inputs) { if (!model_inputs.image_embeddings || !model_inputs.image_positional_embeddings) { // Compute the image embeddings if they are missing model_inputs = { ...model_inputs, ...(await this.get_image_embeddings(model_inputs)) } } if (!model_inputs.input_labels && model_inputs.input_points) { // Set default input labels if they are missing const shape = model_inputs.input_points.dims.slice(0, -1); const numElements = shape.reduce((a, b) => a * b, 1); model_inputs.input_labels = new Tensor( 'int64', new BigInt64Array(numElements).fill(1n), shape ); } const decoder_inputs = { image_embeddings: model_inputs.image_embeddings, image_positional_embeddings: model_inputs.image_positional_embeddings, }; if (model_inputs.input_points) { decoder_inputs.input_points = model_inputs.input_points; } if (model_inputs.input_labels) { decoder_inputs.input_labels = model_inputs.input_labels; } if (model_inputs.input_boxes) { decoder_inputs.input_boxes = model_inputs.input_boxes; } // Returns: // - iou_scores: tensor.float32[batch_size,point_batch_size,3] // - pred_masks: tensor.float32[batch_size,point_batch_size,3,256,256] return await sessionRun(this.sessions['prompt_encoder_mask_decoder'], decoder_inputs); } /** * Runs the model with the provided inputs * @param {Object} model_inputs Model inputs * @returns {Promise<SamImageSegmentationOutput>} Object containing segmentation outputs */ async _call(model_inputs) { return new SamImageSegmentationOutput(await super._call(model_inputs)); } } /** * Base class for Segment-Anything model's output. */ export class SamImageSegmentationOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.iou_scores The output logits of the model. * @param {Tensor} output.pred_masks Predicted boxes. */ constructor({ iou_scores, pred_masks }) { super(); this.iou_scores = iou_scores; this.pred_masks = pred_masks; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MarianMT models export class MarianPreTrainedModel extends PreTrainedModel { }; export class MarianModel extends MarianPreTrainedModel { } export class MarianMTModel extends MarianPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // M2M100 models export class M2M100PreTrainedModel extends PreTrainedModel { }; export class M2M100Model extends M2M100PreTrainedModel { } export class M2M100ForConditionalGeneration extends M2M100PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Wav2Vec2 models export class Wav2Vec2PreTrainedModel extends PreTrainedModel { }; /** * The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top. * * **Example:** Load and run a `Wav2Vec2Model` for feature extraction. * * ```javascript * import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers'; * * // Read and preprocess audio * const processor = await AutoProcessor.from_pretrained('Xenova/mms-300m'); * const audio = await read_audio('https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac', 16000); * const inputs = await processor(audio); * * // Run model with inputs * const model = await AutoModel.from_pretrained('Xenova/mms-300m'); * const output = await model(inputs); * // { * // last_hidden_state: Tensor { * // dims: [ 1, 1144, 1024 ], * // type: 'float32', * // data: Float32Array(1171456) [ ... ], * // size: 1171456 * // } * // } * ``` */ export class Wav2Vec2Model extends Wav2Vec2PreTrainedModel { } export class Wav2Vec2ForCTC extends Wav2Vec2PreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } export class Wav2Vec2ForSequenceClassification extends Wav2Vec2PreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * Wav2Vec2 Model with a frame classification head on top for tasks like Speaker Diarization. */ export class Wav2Vec2ForAudioFrameClassification extends Wav2Vec2PreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // PyAnnote models export class PyAnnotePreTrainedModel extends PreTrainedModel { }; /** * The bare PyAnnote Model transformer outputting raw hidden-states without any specific head on top. */ export class PyAnnoteModel extends PyAnnotePreTrainedModel { } /** * PyAnnote Model with a frame classification head on top for tasks like Speaker Diarization. * * **Example:** Load and run a `PyAnnoteForAudioFrameClassification` for speaker diarization. * * ```javascript * import { AutoProcessor, AutoModelForAudioFrameClassification, read_audio } from '@huggingface/transformers'; * * // Load model and processor * const model_id = 'onnx-community/pyannote-segmentation-3.0'; * const model = await AutoModelForAudioFrameClassification.from_pretrained(model_id); * const processor = await AutoProcessor.from_pretrained(model_id); * * // Read and preprocess audio * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/mlk.wav'; * const audio = await read_audio(url, processor.feature_extractor.config.sampling_rate); * const inputs = await processor(audio); * * // Run model with inputs * const { logits } = await model(inputs); * // { * // logits: Tensor { * // dims: [ 1, 767, 7 ], // [batch_size, num_frames, num_classes] * // type: 'float32', * // data: Float32Array(5369) [ ... ], * // size: 5369 * // } * // } * * const result = processor.post_process_speaker_diarization(logits, audio.length); * // [ * // [ * // { id: 0, start: 0, end: 1.0512535626298245, confidence: 0.8220156481664611 }, * // { id: 2, start: 1.0512535626298245, end: 2.3398869619825127, confidence: 0.9008811707860472 }, * // ... * // ] * // ] * * // Display result * console.table(result[0], ['start', 'end', 'id', 'confidence']); * // ┌─────────┬────────────────────┬────────────────────┬────┬─────────────────────┐ * // │ (index) │ start │ end │ id │ confidence │ * // ├─────────┼────────────────────┼────────────────────┼────┼─────────────────────┤ * // │ 0 │ 0 │ 1.0512535626298245 │ 0 │ 0.8220156481664611 │ * // │ 1 │ 1.0512535626298245 │ 2.3398869619825127 │ 2 │ 0.9008811707860472 │ * // │ 2 │ 2.3398869619825127 │ 3.5946089560890773 │ 0 │ 0.7521651315796233 │ * // │ 3 │ 3.5946089560890773 │ 4.578039708226655 │ 2 │ 0.8491978128022479 │ * // │ 4 │ 4.578039708226655 │ 4.594995410849717 │ 0 │ 0.2935352600416393 │ * // │ 5 │ 4.594995410849717 │ 6.121008646925269 │ 3 │ 0.6788051309866024 │ * // │ 6 │ 6.121008646925269 │ 6.256654267909762 │ 0 │ 0.37125512393851134 │ * // │ 7 │ 6.256654267909762 │ 8.630452635138397 │ 2 │ 0.7467035186353542 │ * // │ 8 │ 8.630452635138397 │ 10.088643060721703 │ 0 │ 0.7689364814666032 │ * // │ 9 │ 10.088643060721703 │ 12.58113134631177 │ 2 │ 0.9123324509131324 │ * // │ 10 │ 12.58113134631177 │ 13.005023911888312 │ 0 │ 0.4828358177572041 │ * // └─────────┴────────────────────┴────────────────────┴────┴─────────────────────┘ * ``` */ export class PyAnnoteForAudioFrameClassification extends PyAnnotePreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // WeSpeakerResNet models export class WeSpeakerResNetPreTrainedModel extends PreTrainedModel { }; export class WeSpeakerResNetModel extends WeSpeakerResNetPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // UniSpeech models export class UniSpeechPreTrainedModel extends PreTrainedModel { }; /** * The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top. */ export class UniSpeechModel extends UniSpeechPreTrainedModel { } /** * UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). */ export class UniSpeechForCTC extends UniSpeechPreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } /** * UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output). */ export class UniSpeechForSequenceClassification extends UniSpeechPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // UniSpeechSat models export class UniSpeechSatPreTrainedModel extends PreTrainedModel { }; /** * The bare UniSpeechSat Model transformer outputting raw hidden-states without any specific head on top. */ export class UniSpeechSatModel extends UniSpeechSatPreTrainedModel { } /** * UniSpeechSat Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). */ export class UniSpeechSatForCTC extends UniSpeechSatPreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } /** * UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output). */ export class UniSpeechSatForSequenceClassification extends UniSpeechSatPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * UniSpeechSat Model with a frame classification head on top for tasks like Speaker Diarization. */ export class UniSpeechSatForAudioFrameClassification extends UniSpeechSatPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Wav2Vec2Bert models export class Wav2Vec2BertPreTrainedModel extends PreTrainedModel { }; /** * The bare Wav2Vec2Bert Model transformer outputting raw hidden-states without any specific head on top. */ export class Wav2Vec2BertModel extends Wav2Vec2BertPreTrainedModel { } /** * Wav2Vec2Bert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). */ export class Wav2Vec2BertForCTC extends Wav2Vec2BertPreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_features Float values of input mel-spectrogram. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } /** * Wav2Vec2Bert Model with a sequence classification head on top (a linear layer over the pooled output). */ export class Wav2Vec2BertForSequenceClassification extends Wav2Vec2BertPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Hubert models export class HubertPreTrainedModel extends PreTrainedModel { } /** * The bare Hubert Model transformer outputting raw hidden-states without any specific head on top. * * **Example:** Load and run a `HubertModel` for feature extraction. * * ```javascript * import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers'; * * // Read and preprocess audio * const processor = await AutoProcessor.from_pretrained('Xenova/hubert-base-ls960'); * const audio = await read_audio('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav', 16000); * const inputs = await processor(audio); * * // Load and run model with inputs * const model = await AutoModel.from_pretrained('Xenova/hubert-base-ls960'); * const output = await model(inputs); * // { * // last_hidden_state: Tensor { * // dims: [ 1, 549, 768 ], * // type: 'float32', * // data: Float32Array(421632) [0.0682469978928566, 0.08104046434164047, -0.4975186586380005, ...], * // size: 421632 * // } * // } * ``` */ export class HubertModel extends Wav2Vec2PreTrainedModel { } /** * Hubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). */ export class HubertForCTC extends Wav2Vec2PreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } /** * Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. */ export class HubertForSequenceClassification extends Wav2Vec2PreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // WavLM models /** * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. */ export class WavLMPreTrainedModel extends PreTrainedModel { }; /** * The bare WavLM Model transformer outputting raw hidden-states without any specific head on top. * * **Example:** Load and run a `WavLMModel` for feature extraction. * * ```javascript * import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers'; * * // Read and preprocess audio * const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base'); * const audio = await read_audio('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav', 16000); * const inputs = await processor(audio); * * // Run model with inputs * const model = await AutoModel.from_pretrained('Xenova/wavlm-base'); * const output = await model(inputs); * // { * // last_hidden_state: Tensor { * // dims: [ 1, 549, 768 ], * // type: 'float32', * // data: Float32Array(421632) [-0.349443256855011, -0.39341306686401367, 0.022836603224277496, ...], * // size: 421632 * // } * // } * ``` */ export class WavLMModel extends WavLMPreTrainedModel { } /** * WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). */ export class WavLMForCTC extends WavLMPreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } /** * WavLM Model with a sequence classification head on top (a linear layer over the pooled output). */ export class WavLMForSequenceClassification extends WavLMPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification. * * **Example:** Extract speaker embeddings with `WavLMForXVector`. * ```javascript * import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers'; * * // Read and preprocess audio * const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base-plus-sv'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; * const audio = await read_audio(url, 16000); * const inputs = await processor(audio); * * // Run model with inputs * const model = await AutoModel.from_pretrained('Xenova/wavlm-base-plus-sv'); * const outputs = await model(inputs); * // { * // logits: Tensor { * // dims: [ 1, 512 ], * // type: 'float32', * // data: Float32Array(512) [0.5847219228744507, ...], * // size: 512 * // }, * // embeddings: Tensor { * // dims: [ 1, 512 ], * // type: 'float32', * // data: Float32Array(512) [-0.09079201519489288, ...], * // size: 512 * // } * // } * ``` */ export class WavLMForXVector extends WavLMPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<XVectorOutput>} An object containing the model's output logits and speaker embeddings. */ async _call(model_inputs) { return new XVectorOutput(await super._call(model_inputs)); } } /** * WavLM Model with a frame classification head on top for tasks like Speaker Diarization. * * **Example:** Perform speaker diarization with `WavLMForAudioFrameClassification`. * ```javascript * import { AutoProcessor, AutoModelForAudioFrameClassification, read_audio } from '@huggingface/transformers'; * * // Read and preprocess audio * const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base-plus-sd'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; * const audio = await read_audio(url, 16000); * const inputs = await processor(audio); * * // Run model with inputs * const model = await AutoModelForAudioFrameClassification.from_pretrained('Xenova/wavlm-base-plus-sd'); * const { logits } = await model(inputs); * // { * // logits: Tensor { * // dims: [ 1, 549, 2 ], // [batch_size, num_frames, num_speakers] * // type: 'float32', * // data: Float32Array(1098) [-3.5301010608673096, ...], * // size: 1098 * // } * // } * * const labels = logits[0].sigmoid().tolist().map( * frames => frames.map(speaker => speaker > 0.5 ? 1 : 0) * ); * console.log(labels); // labels is a one-hot array of shape (num_frames, num_speakers) * // [ * // [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], * // [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], * // [0, 0], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], * // ... * // ] * ``` */ export class WavLMForAudioFrameClassification extends WavLMPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } export class StyleTextToSpeech2PreTrainedModel extends PreTrainedModel { } export class StyleTextToSpeech2Model extends StyleTextToSpeech2PreTrainedModel { } ////////////////////////////////////////////////// // SpeechT5 models /** * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. */ export class SpeechT5PreTrainedModel extends PreTrainedModel { }; /** * The bare SpeechT5 Encoder-Decoder Model outputting raw hidden-states without any specific pre- or post-nets. */ export class SpeechT5Model extends SpeechT5PreTrainedModel { }; /** * SpeechT5 Model with a speech encoder and a text decoder. * * **Example:** Generate speech from text with `SpeechT5ForSpeechToText`. * ```javascript * import { AutoTokenizer, AutoProcessor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, Tensor } from '@huggingface/transformers'; * * // Load the tokenizer and processor * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/speecht5_tts'); * const processor = await AutoProcessor.from_pretrained('Xenova/speecht5_tts'); * * // Load the models * // NOTE: We use the full-precision versions as they are more accurate * const model = await SpeechT5ForTextToSpeech.from_pretrained('Xenova/speecht5_tts', { dtype: 'fp32' }); * const vocoder = await SpeechT5HifiGan.from_pretrained('Xenova/speecht5_hifigan', { dtype: 'fp32' }); * * // Load speaker embeddings from URL * const speaker_embeddings_data = new Float32Array( * await (await fetch('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/speaker_embeddings.bin')).arrayBuffer() * ); * const speaker_embeddings = new Tensor( * 'float32', * speaker_embeddings_data, * [1, speaker_embeddings_data.length] * ) * * // Run tokenization * const { input_ids } = tokenizer('Hello, my dog is cute'); * * // Generate waveform * const { waveform } = await model.generate_speech(input_ids, speaker_embeddings, { vocoder }); * console.log(waveform) * // Tensor { * // dims: [ 26112 ], * // type: 'float32', * // size: 26112, * // data: Float32Array(26112) [ -0.00043630177970044315, -0.00018082228780258447, ... ], * // } * ``` */ export class SpeechT5ForSpeechToText extends SpeechT5PreTrainedModel { } /** * SpeechT5 Model with a text encoder and a speech decoder. */ export class SpeechT5ForTextToSpeech extends SpeechT5PreTrainedModel { /** * @typedef {Object} SpeechOutput * @property {Tensor} [spectrogram] The predicted log-mel spectrogram of shape * `(output_sequence_length, config.num_mel_bins)`. Returned when no `vocoder` is provided * @property {Tensor} [waveform] The predicted waveform of shape `(num_frames,)`. Returned when a `vocoder` is provided. * @property {Tensor} [cross_attentions] The outputs of the decoder's cross-attention layers of shape * `(config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)`. returned when `output_cross_attentions` is `true`. */ /** * Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a speech waveform using a vocoder. * @param {Tensor} input_values Indices of input sequence tokens in the vocabulary. * @param {Tensor} speaker_embeddings Tensor containing the speaker embeddings. * @param {Object} options Optional parameters for generating speech. * @param {number} [options.threshold=0.5] The generated sequence ends when the predicted stop token probability exceeds this value. * @param {number} [options.minlenratio=0.0] Used to calculate the minimum required length for the output sequence. * @param {number} [options.maxlenratio=20.0] Used to calculate the maximum allowed length for the output sequence. * @param {Object} [options.vocoder=null] The vocoder that converts the mel spectrogram into a speech waveform. If `null`, the output is the mel spectrogram. * @param {boolean} [options.output_cross_attentions=false] Whether or not to return the attentions tensors of the decoder's cross-attention layers. * @returns {Promise<SpeechOutput>} A promise which resolves to an object containing the spectrogram, waveform, and cross-attention tensors. */ async generate_speech(input_values, speaker_embeddings, { threshold = 0.5, minlenratio = 0.0, maxlenratio = 20.0, vocoder = null, // output_cross_attentions = false, // TODO add } = {}) { const model_inputs = { input_ids: input_values } const { encoder_outputs, encoder_attention_mask } = await encoderForward(this, model_inputs); // @ts-expect-error TS2339 const r = encoder_outputs.dims[1] / this.config.reduction_factor; const maxlen = Math.floor(r * maxlenratio); const minlen = Math.floor(r * minlenratio); // @ts-expect-error TS2339 const num_mel_bins = this.config.num_mel_bins; let spectrogramParts = []; let past_key_values = null; let decoder_outputs = null; let idx = 0; while (true) { ++idx; const use_cache_branch = boolTensor(!!decoder_outputs); let output_sequence; if (decoder_outputs) { output_sequence = decoder_outputs.output_sequence_out; } else { output_sequence = new Tensor( 'float32', new Float32Array(num_mel_bins), [1, 1, num_mel_bins], ) } let decoderFeeds = { use_cache_branch, output_sequence, encoder_attention_mask: encoder_attention_mask, speaker_embeddings: speaker_embeddings, encoder_hidden_states: encoder_outputs, }; this.addPastKeyValues(decoderFeeds, past_key_values); decoder_outputs = await sessionRun(this.sessions['decoder_model_merged'], decoderFeeds); past_key_values = this.getPastKeyValues(decoder_outputs, past_key_values); const { prob, spectrum } = decoder_outputs; spectrogramParts.push(spectrum); if (idx >= minlen && ( // Finished when stop token or maximum length is reached. Array.from(prob.data).filter(p => p >= threshold).length > 0 || idx >= maxlen )) { break; } } const spectrogram = cat(spectrogramParts); const { waveform } = await sessionRun(vocoder.sessions['model'], { spectrogram }); return { spectrogram, waveform, // cross_attentions: null, // TODO add } } } /** * HiFi-GAN vocoder. * * See [SpeechT5ForSpeechToText](./models#module_models.SpeechT5ForSpeechToText) for example usage. */ export class SpeechT5HifiGan extends PreTrainedModel { main_input_name = 'spectrogram'; } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // TrOCR models export class TrOCRPreTrainedModel extends PreTrainedModel { } /** * The TrOCR Decoder with a language modeling head. */ export class TrOCRForCausalLM extends TrOCRPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Mistral models /** * The bare Mistral Model outputting raw hidden-states without any specific head on top. */ export class MistralPreTrainedModel extends PreTrainedModel { } export class MistralModel extends MistralPreTrainedModel { } export class MistralForCausalLM extends MistralPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Starcoder2 models /** * The bare Starcoder2 Model outputting raw hidden-states without any specific head on top. */ export class Starcoder2PreTrainedModel extends PreTrainedModel { } export class Starcoder2Model extends Starcoder2PreTrainedModel { } export class Starcoder2ForCausalLM extends Starcoder2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Falcon models /** * The bare Falcon Model outputting raw hidden-states without any specific head on top. */ export class FalconPreTrainedModel extends PreTrainedModel { } export class FalconModel extends FalconPreTrainedModel { } export class FalconForCausalLM extends FalconPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // CLAP models export class ClapPreTrainedModel extends PreTrainedModel { } export class ClapModel extends ClapPreTrainedModel { } /** * CLAP Text Model with a projection layer on top (a linear layer on top of the pooled output). * * **Example:** Compute text embeddings with `ClapTextModelWithProjection`. * * ```javascript * import { AutoTokenizer, ClapTextModelWithProjection } from '@huggingface/transformers'; * * // Load tokenizer and text model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clap-htsat-unfused'); * const text_model = await ClapTextModelWithProjection.from_pretrained('Xenova/clap-htsat-unfused'); * * // Run tokenization * const texts = ['a sound of a cat', 'a sound of a dog']; * const text_inputs = tokenizer(texts, { padding: true, truncation: true }); * * // Compute embeddings * const { text_embeds } = await text_model(text_inputs); * // Tensor { * // dims: [ 2, 512 ], * // type: 'float32', * // data: Float32Array(1024) [ ... ], * // size: 1024 * // } * ``` */ export class ClapTextModelWithProjection extends ClapPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'text_model', }); } } /** * CLAP Audio Model with a projection layer on top (a linear layer on top of the pooled output). * * **Example:** Compute audio embeddings with `ClapAudioModelWithProjection`. * * ```javascript * import { AutoProcessor, ClapAudioModelWithProjection, read_audio } from '@huggingface/transformers'; * * // Load processor and audio model * const processor = await AutoProcessor.from_pretrained('Xenova/clap-htsat-unfused'); * const audio_model = await ClapAudioModelWithProjection.from_pretrained('Xenova/clap-htsat-unfused'); * * // Read audio and run processor * const audio = await read_audio('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cat_meow.wav'); * const audio_inputs = await processor(audio); * * // Compute embeddings * const { audio_embeds } = await audio_model(audio_inputs); * // Tensor { * // dims: [ 1, 512 ], * // type: 'float32', * // data: Float32Array(512) [ ... ], * // size: 512 * // } * ``` */ export class ClapAudioModelWithProjection extends ClapPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'audio_model', }); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // VITS models export class VitsPreTrainedModel extends PreTrainedModel { } /** * The complete VITS model, for text-to-speech synthesis. * * **Example:** Generate speech from text with `VitsModel`. * ```javascript * import { AutoTokenizer, VitsModel } from '@huggingface/transformers'; * * // Load the tokenizer and model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/mms-tts-eng'); * const model = await VitsModel.from_pretrained('Xenova/mms-tts-eng'); * * // Run tokenization * const inputs = tokenizer('I love transformers'); * * // Generate waveform * const { waveform } = await model(inputs); * // Tensor { * // dims: [ 1, 35328 ], * // type: 'float32', * // data: Float32Array(35328) [ ... ], * // size: 35328, * // } * ``` */ export class VitsModel extends VitsPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<VitsModelOutput>} The outputs for the VITS model. */ async _call(model_inputs) { return new VitsModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Segformer models export class SegformerPreTrainedModel extends PreTrainedModel { } /** * The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top. */ export class SegformerModel extends SegformerPreTrainedModel { } /** * SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet. */ export class SegformerForImageClassification extends SegformerPreTrainedModel { } /** * SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes. */ export class SegformerForSemanticSegmentation extends SegformerPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // StableLm models export class StableLmPreTrainedModel extends PreTrainedModel { } /** * The bare StableLm Model transformer outputting raw hidden-states without any specific head on top. */ export class StableLmModel extends StableLmPreTrainedModel { } /** * StableLm Model with a `language modeling` head on top for Causal Language Modeling (with past). */ export class StableLmForCausalLM extends StableLmPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class EfficientNetPreTrainedModel extends PreTrainedModel { } /** * The bare EfficientNet model outputting raw features without any specific head on top. */ export class EfficientNetModel extends EfficientNetPreTrainedModel { } /** * EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features). */ export class EfficientNetForImageClassification extends EfficientNetPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Musicgen models export class MusicgenPreTrainedModel extends PreTrainedModel { } /** * The bare Musicgen decoder model outputting raw hidden-states without any specific head on top. */ export class MusicgenModel extends MusicgenPreTrainedModel { } /** * The MusicGen decoder model with a language modelling head on top. */ export class MusicgenForCausalLM extends MusicgenPreTrainedModel { } /** * The composite MusicGen model with a text encoder, audio encoder and Musicgen decoder, * for music generation tasks with one or both of text and audio prompts. * * **Example:** Generate music from text with `Xenova/musicgen-small`. * ```javascript * import { AutoTokenizer, MusicgenForConditionalGeneration } from '@huggingface/transformers'; * * // Load tokenizer and model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/musicgen-small'); * const model = await MusicgenForConditionalGeneration.from_pretrained( * 'Xenova/musicgen-small', { dtype: 'fp32' } * ); * * // Prepare text input * const prompt = '80s pop track with bassy drums and synth'; * const inputs = tokenizer(prompt); * * // Generate audio * const audio_values = await model.generate({ * ...inputs, * max_new_tokens: 512, * do_sample: true, * guidance_scale: 3, * }); * * // (Optional) Write the output to a WAV file * import wavefile from 'wavefile'; * import fs from 'fs'; * * const wav = new wavefile.WaveFile(); * wav.fromScratch(1, model.config.audio_encoder.sampling_rate, '32f', audio_values.data); * fs.writeFileSync('musicgen_out.wav', wav.toBuffer()); * ``` */ export class MusicgenForConditionalGeneration extends PreTrainedModel { // NOTE: not MusicgenPreTrainedModel forward_params = [ 'input_ids', 'attention_mask', 'encoder_outputs', 'decoder_input_ids', 'decoder_attention_mask', 'past_key_values', ]; /** * Apply the pattern mask to the final ids, * then revert the pattern delay mask by filtering the pad token id in a single step. * @param {Tensor} outputs The output tensor from the model. * @returns {Tensor} The filtered output tensor. */ _apply_and_filter_by_delay_pattern_mask(outputs) { const [bs_x_codebooks, seqLength] = outputs.dims; // @ts-expect-error TS2339 const num_codebooks = this.config.decoder.num_codebooks; const upperBound = (seqLength - num_codebooks); let newDataSize = 0; for (let i = 0; i < outputs.size; ++i) { // @ts-expect-error TS2339 if (outputs.data[i] === this.config.decoder.pad_token_id) { continue; } const row = (i % seqLength); const col = Math.floor(i / seqLength) % num_codebooks; const diff = row - col; if (diff > 0 && diff <= upperBound) { outputs.data[newDataSize++] = outputs.data[i]; } } const batch_size = Math.floor(bs_x_codebooks / num_codebooks); const inferred = newDataSize / (batch_size * num_codebooks); // TODO: assert `inferred` is an integer return new Tensor( outputs.type, outputs.data.slice(0, newDataSize), [batch_size, num_codebooks, inferred] ); } prepare_inputs_for_generation(input_ids, model_inputs, generation_config) { // apply the delay pattern mask let clonedInputIds = structuredClone(input_ids); for (let i = 0; i < clonedInputIds.length; ++i) { for (let j = 0; j < clonedInputIds[i].length; ++j) { // @ts-expect-error TS2339 if ((i % this.config.decoder.num_codebooks) >= j) { // @ts-expect-error TS2339 clonedInputIds[i][j] = BigInt(this.config.decoder.pad_token_id); } } } // for classifier free guidance we need to replicate the decoder args across the batch dim // (we'll split these before sampling) if (generation_config.guidance_scale !== null && generation_config.guidance_scale > 1) { // [batch, seqLength] -> [2 * batch, seqLength] clonedInputIds = clonedInputIds.concat(clonedInputIds); } const prepped = super.prepare_inputs_for_generation(clonedInputIds, model_inputs, generation_config); return prepped; } /** * Generates sequences of token ids for models with a language modeling head. * @param {import('./generation/parameters.js').GenerationFunctionParameters} options * @returns {Promise<ModelOutput|Tensor>} The output of the model, which can contain the generated token ids, attentions, and scores. */ async generate(options) { const output_ids = await super.generate(options); // apply the pattern mask to the final ids // tensor: int64[1,batch_size,4,chunk_length] const audio_codes = this._apply_and_filter_by_delay_pattern_mask( /** @type {Tensor} */(output_ids) ).unsqueeze_(0); // append the frame dimension back to the audio codes const { audio_values } = await sessionRun(this.sessions['encodec_decode'], { audio_codes }) return audio_values; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileNetV1 models export class MobileNetV1PreTrainedModel extends PreTrainedModel { } /** * The bare MobileNetV1 model outputting raw hidden-states without any specific head on top. */ export class MobileNetV1Model extends MobileNetV1PreTrainedModel { } /** * MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), * e.g. for ImageNet. */ export class MobileNetV1ForImageClassification extends MobileNetV1PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileNetV2 models export class MobileNetV2PreTrainedModel extends PreTrainedModel { } /** * The bare MobileNetV2 model outputting raw hidden-states without any specific head on top. */ export class MobileNetV2Model extends MobileNetV2PreTrainedModel { } /** * MobileNetV2 model with an image classification head on top (a linear layer on top of the pooled features), * e.g. for ImageNet. */ export class MobileNetV2ForImageClassification extends MobileNetV2PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileNetV3 models export class MobileNetV3PreTrainedModel extends PreTrainedModel { } /** * The bare MobileNetV3 model outputting raw hidden-states without any specific head on top. */ export class MobileNetV3Model extends MobileNetV3PreTrainedModel { } /** * MobileNetV3 model with an image classification head on top (a linear layer on top of the pooled features), * e.g. for ImageNet. */ export class MobileNetV3ForImageClassification extends MobileNetV3PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileNetV4 models export class MobileNetV4PreTrainedModel extends PreTrainedModel { } /** * The bare MobileNetV4 model outputting raw hidden-states without any specific head on top. */ export class MobileNetV4Model extends MobileNetV4PreTrainedModel { } /** * MobileNetV4 model with an image classification head on top (a linear layer on top of the pooled features), * e.g. for ImageNet. */ export class MobileNetV4ForImageClassification extends MobileNetV4PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Decision Transformer models export class DecisionTransformerPreTrainedModel extends PreTrainedModel { } /** * The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL setting. * Refer to the paper for more details: https://arxiv.org/abs/2106.01345 */ export class DecisionTransformerModel extends DecisionTransformerPreTrainedModel { } ////////////////////////////////////////////////// export class MultiModalityPreTrainedModel extends PreTrainedModel { } export class MultiModalityCausalLM extends MultiModalityPreTrainedModel { forward_params = [ // prepare_inputs_embeds 'input_ids', 'pixel_values', 'images_seq_mask', 'images_emb_mask', // language_model 'attention_mask', 'position_ids', 'past_key_values', ]; /** * @param {ConstructorParameters<typeof MultiModalityPreTrainedModel>} args */ constructor(...args) { super(...args); // State-based approach to switch out which heads to use during generation this._generation_mode = 'text'; } async forward(model_inputs) { const mode = this._generation_mode ?? 'text'; // TODO support re-using PKVs for input_ids.dims[1] !== 1 // if (model_inputs.past_key_values) { // // && model_inputs.input_ids.dims[1] === 1 // } let output_1; if (mode === 'text' || !model_inputs.past_key_values) { const session = this.sessions['prepare_inputs_embeds']; const prep_inputs = pick(model_inputs, session.inputNames); output_1 = await sessionRun(session, prep_inputs); } else { const session = this.sessions['gen_img_embeds']; const prep_inputs = pick({ image_ids: model_inputs.input_ids, }, session.inputNames); output_1 = await sessionRun(session, prep_inputs); } const input_2 = { ...model_inputs, ...output_1 } const output_2 = await decoderForward(this, input_2); const head = this.sessions[ mode === 'text' ? 'lm_head' : 'gen_head' ]; if (!head) { throw new Error(`Unable to find "${head}" generation head`); } const output_3 = await sessionRun(head, pick(output_2, head.inputNames)) return { ...output_1, ...output_2, ...output_3, }; } /** * @param {import('./generation/parameters.js').GenerationFunctionParameters} options */ async generate(options) { this._generation_mode = 'text'; return super.generate(options); } /** * @param {import('./generation/parameters.js').GenerationFunctionParameters} options */ async generate_images(options) { this._generation_mode = 'image'; const start_num_tokens = (options.inputs ?? options[this.main_input_name]).dims[1]; const all_tokens = await super.generate(options); const generated_tokens = (/** @type {Tensor} */(all_tokens)).slice(null, [start_num_tokens, null]) const image_decode = this.sessions['image_decode']; const { decoded_image } = await sessionRun(image_decode, { generated_tokens, }); // Equivalent to `np.clip((dec + 1) / 2 * 255, 0, 255)` const clamped = decoded_image .add_(1) .mul_(255 / 2) .clamp_(0, 255) .to('uint8'); // Return as a list of images const images = []; for (const tensor of clamped) { const img = RawImage.fromTensor(tensor); images.push(img); } return images; } } export class MgpstrModelOutput extends ModelOutput { constructor({ char_logits, bpe_logits, wp_logits }) { super(); this.char_logits = char_logits; this.bpe_logits = bpe_logits; this.wp_logits = wp_logits; } get logits() { return [this.char_logits, this.bpe_logits, this.wp_logits]; } } export class MgpstrPreTrainedModel extends PreTrainedModel { } /** * MGP-STR Model transformer with three classification heads on top * (three A^3 modules and three linear layer on top of the transformer encoder output) for scene text recognition (STR). */ export class MgpstrForSceneTextRecognition extends MgpstrPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new MgpstrModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// // PatchTST Transformer models export class PatchTSTPreTrainedModel extends PreTrainedModel { } /** * The bare PatchTST Model outputting raw hidden-states without any specific head. */ export class PatchTSTModel extends PatchTSTPreTrainedModel { } /** * The PatchTST for prediction model. */ export class PatchTSTForPrediction extends PatchTSTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // PatchTSMixer Transformer models export class PatchTSMixerPreTrainedModel extends PreTrainedModel { } /** * The bare PatchTSMixer Model outputting raw hidden-states without any specific head. */ export class PatchTSMixerModel extends PatchTSMixerPreTrainedModel { } /** * The PatchTSMixer for prediction model. */ export class PatchTSMixerForPrediction extends PatchTSMixerPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // AutoModels, used to simplify construction of PreTrainedModels // (uses config to instantiate correct class) /** * Base class of all AutoModels. Contains the `from_pretrained` function * which is used to instantiate pretrained models. */ export class PretrainedMixin { /** * Mapping from model type to model class. * @type {Map<string, Object>[]} */ static MODEL_CLASS_MAPPINGS = null; /** * Whether to attempt to instantiate the base class (`PretrainedModel`) if * the model type is not found in the mapping. */ static BASE_IF_FAIL = false; /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, { progress_callback = null, config = null, cache_dir = null, local_files_only = false, revision = 'main', model_file_name = null, subfolder = 'onnx', device = null, dtype = null, use_external_data_format = null, session_options = {}, } = {}) { const options = { progress_callback, config, cache_dir, local_files_only, revision, model_file_name, subfolder, device, dtype, use_external_data_format, session_options, } options.config = await AutoConfig.from_pretrained(pretrained_model_name_or_path, options); if (!this.MODEL_CLASS_MAPPINGS) { throw new Error("`MODEL_CLASS_MAPPINGS` not implemented for this type of `AutoClass`: " + this.name); } for (const MODEL_CLASS_MAPPING of this.MODEL_CLASS_MAPPINGS) { const modelInfo = MODEL_CLASS_MAPPING.get(options.config.model_type); if (!modelInfo) { continue; // Item not found in this mapping } return await modelInfo[1].from_pretrained(pretrained_model_name_or_path, options); } if (this.BASE_IF_FAIL) { console.warn(`Unknown model class "${options.config.model_type}", attempting to construct from base class.`); return await PreTrainedModel.from_pretrained(pretrained_model_name_or_path, options); } else { throw Error(`Unsupported model type: ${options.config.model_type}`) } } } const MODEL_MAPPING_NAMES_ENCODER_ONLY = new Map([ ['bert', ['BertModel', BertModel]], ['modernbert', ['ModernBertModel', ModernBertModel]], ['nomic_bert', ['NomicBertModel', NomicBertModel]], ['roformer', ['RoFormerModel', RoFormerModel]], ['electra', ['ElectraModel', ElectraModel]], ['esm', ['EsmModel', EsmModel]], ['convbert', ['ConvBertModel', ConvBertModel]], ['camembert', ['CamembertModel', CamembertModel]], ['deberta', ['DebertaModel', DebertaModel]], ['deberta-v2', ['DebertaV2Model', DebertaV2Model]], ['mpnet', ['MPNetModel', MPNetModel]], ['albert', ['AlbertModel', AlbertModel]], ['distilbert', ['DistilBertModel', DistilBertModel]], ['roberta', ['RobertaModel', RobertaModel]], ['xlm', ['XLMModel', XLMModel]], ['xlm-roberta', ['XLMRobertaModel', XLMRobertaModel]], ['clap', ['ClapModel', ClapModel]], ['clip', ['CLIPModel', CLIPModel]], ['clipseg', ['CLIPSegModel', CLIPSegModel]], ['chinese_clip', ['ChineseCLIPModel', ChineseCLIPModel]], ['siglip', ['SiglipModel', SiglipModel]], ['jina_clip', ['JinaCLIPModel', JinaCLIPModel]], ['mobilebert', ['MobileBertModel', MobileBertModel]], ['squeezebert', ['SqueezeBertModel', SqueezeBertModel]], ['wav2vec2', ['Wav2Vec2Model', Wav2Vec2Model]], ['wav2vec2-bert', ['Wav2Vec2BertModel', Wav2Vec2BertModel]], ['unispeech', ['UniSpeechModel', UniSpeechModel]], ['unispeech-sat', ['UniSpeechSatModel', UniSpeechSatModel]], ['hubert', ['HubertModel', HubertModel]], ['wavlm', ['WavLMModel', WavLMModel]], ['audio-spectrogram-transformer', ['ASTModel', ASTModel]], ['vits', ['VitsModel', VitsModel]], ['pyannote', ['PyAnnoteModel', PyAnnoteModel]], ['wespeaker-resnet', ['WeSpeakerResNetModel', WeSpeakerResNetModel]], ['detr', ['DetrModel', DetrModel]], ['rt_detr', ['RTDetrModel', RTDetrModel]], ['table-transformer', ['TableTransformerModel', TableTransformerModel]], ['vit', ['ViTModel', ViTModel]], ['ijepa', ['IJepaModel', IJepaModel]], ['pvt', ['PvtModel', PvtModel]], ['vit_msn', ['ViTMSNModel', ViTMSNModel]], ['vit_mae', ['ViTMAEModel', ViTMAEModel]], ['groupvit', ['GroupViTModel', GroupViTModel]], ['fastvit', ['FastViTModel', FastViTModel]], ['mobilevit', ['MobileViTModel', MobileViTModel]], ['mobilevitv2', ['MobileViTV2Model', MobileViTV2Model]], ['owlvit', ['OwlViTModel', OwlViTModel]], ['owlv2', ['Owlv2Model', Owlv2Model]], ['beit', ['BeitModel', BeitModel]], ['deit', ['DeiTModel', DeiTModel]], ['hiera', ['HieraModel', HieraModel]], ['convnext', ['ConvNextModel', ConvNextModel]], ['convnextv2', ['ConvNextV2Model', ConvNextV2Model]], ['dinov2', ['Dinov2Model', Dinov2Model]], ['dinov2_with_registers', ['Dinov2WithRegistersModel', Dinov2WithRegistersModel]], ['resnet', ['ResNetModel', ResNetModel]], ['swin', ['SwinModel', SwinModel]], ['swin2sr', ['Swin2SRModel', Swin2SRModel]], ['donut-swin', ['DonutSwinModel', DonutSwinModel]], ['yolos', ['YolosModel', YolosModel]], ['dpt', ['DPTModel', DPTModel]], ['glpn', ['GLPNModel', GLPNModel]], ['hifigan', ['SpeechT5HifiGan', SpeechT5HifiGan]], ['efficientnet', ['EfficientNetModel', EfficientNetModel]], ['decision_transformer', ['DecisionTransformerModel', DecisionTransformerModel]], ['patchtst', ['PatchTSTForPrediction', PatchTSTModel]], ['patchtsmixer', ['PatchTSMixerForPrediction', PatchTSMixerModel]], ['mobilenet_v1', ['MobileNetV1Model', MobileNetV1Model]], ['mobilenet_v2', ['MobileNetV2Model', MobileNetV2Model]], ['mobilenet_v3', ['MobileNetV3Model', MobileNetV3Model]], ['mobilenet_v4', ['MobileNetV4Model', MobileNetV4Model]], ['maskformer', ['MaskFormerModel', MaskFormerModel]], ['mgp-str', ['MgpstrForSceneTextRecognition', MgpstrForSceneTextRecognition]], ['style_text_to_speech_2', ['StyleTextToSpeech2Model', StyleTextToSpeech2Model]], ]); const MODEL_MAPPING_NAMES_ENCODER_DECODER = new Map([ ['t5', ['T5Model', T5Model]], ['longt5', ['LongT5Model', LongT5Model]], ['mt5', ['MT5Model', MT5Model]], ['bart', ['BartModel', BartModel]], ['mbart', ['MBartModel', MBartModel]], ['marian', ['MarianModel', MarianModel]], ['whisper', ['WhisperModel', WhisperModel]], ['m2m_100', ['M2M100Model', M2M100Model]], ['blenderbot', ['BlenderbotModel', BlenderbotModel]], ['blenderbot-small', ['BlenderbotSmallModel', BlenderbotSmallModel]], ]); const MODEL_MAPPING_NAMES_DECODER_ONLY = new Map([ ['bloom', ['BloomModel', BloomModel]], ['jais', ['JAISModel', JAISModel]], ['gpt2', ['GPT2Model', GPT2Model]], ['gptj', ['GPTJModel', GPTJModel]], ['gpt_bigcode', ['GPTBigCodeModel', GPTBigCodeModel]], ['gpt_neo', ['GPTNeoModel', GPTNeoModel]], ['gpt_neox', ['GPTNeoXModel', GPTNeoXModel]], ['codegen', ['CodeGenModel', CodeGenModel]], ['llama', ['LlamaModel', LlamaModel]], ['exaone', ['ExaoneModel', ExaoneModel]], ['olmo', ['OlmoModel', OlmoModel]], ['olmo2', ['Olmo2Model', Olmo2Model]], ['mobilellm', ['MobileLLMModel', MobileLLMModel]], ['granite', ['GraniteModel', GraniteModel]], ['cohere', ['CohereModel', CohereModel]], ['gemma', ['GemmaModel', GemmaModel]], ['gemma2', ['Gemma2Model', Gemma2Model]], ['helium', ['HeliumModel', HeliumModel]], ['glm', ['GlmModel', GlmModel]], ['openelm', ['OpenELMModel', OpenELMModel]], ['qwen2', ['Qwen2Model', Qwen2Model]], ['phi', ['PhiModel', PhiModel]], ['phi3', ['Phi3Model', Phi3Model]], ['mpt', ['MptModel', MptModel]], ['opt', ['OPTModel', OPTModel]], ['mistral', ['MistralModel', MistralModel]], ['starcoder2', ['Starcoder2Model', Starcoder2Model]], ['falcon', ['FalconModel', FalconModel]], ['stablelm', ['StableLmModel', StableLmModel]], ]); const MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = new Map([ ['speecht5', ['SpeechT5ForSpeechToText', SpeechT5ForSpeechToText]], ['whisper', ['WhisperForConditionalGeneration', WhisperForConditionalGeneration]], ['moonshine', ['MoonshineForConditionalGeneration', MoonshineForConditionalGeneration]], ]); const MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES = new Map([ ['speecht5', ['SpeechT5ForTextToSpeech', SpeechT5ForTextToSpeech]], ]); const MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES = new Map([ ['vits', ['VitsModel', VitsModel]], ['musicgen', ['MusicgenForConditionalGeneration', MusicgenForConditionalGeneration]], ]); const MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = new Map([ ['bert', ['BertForSequenceClassification', BertForSequenceClassification]], ['modernbert', ['ModernBertForSequenceClassification', ModernBertForSequenceClassification]], ['roformer', ['RoFormerForSequenceClassification', RoFormerForSequenceClassification]], ['electra', ['ElectraForSequenceClassification', ElectraForSequenceClassification]], ['esm', ['EsmForSequenceClassification', EsmForSequenceClassification]], ['convbert', ['ConvBertForSequenceClassification', ConvBertForSequenceClassification]], ['camembert', ['CamembertForSequenceClassification', CamembertForSequenceClassification]], ['deberta', ['DebertaForSequenceClassification', DebertaForSequenceClassification]], ['deberta-v2', ['DebertaV2ForSequenceClassification', DebertaV2ForSequenceClassification]], ['mpnet', ['MPNetForSequenceClassification', MPNetForSequenceClassification]], ['albert', ['AlbertForSequenceClassification', AlbertForSequenceClassification]], ['distilbert', ['DistilBertForSequenceClassification', DistilBertForSequenceClassification]], ['roberta', ['RobertaForSequenceClassification', RobertaForSequenceClassification]], ['xlm', ['XLMForSequenceClassification', XLMForSequenceClassification]], ['xlm-roberta', ['XLMRobertaForSequenceClassification', XLMRobertaForSequenceClassification]], ['bart', ['BartForSequenceClassification', BartForSequenceClassification]], ['mbart', ['MBartForSequenceClassification', MBartForSequenceClassification]], ['mobilebert', ['MobileBertForSequenceClassification', MobileBertForSequenceClassification]], ['squeezebert', ['SqueezeBertForSequenceClassification', SqueezeBertForSequenceClassification]], ]); const MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = new Map([ ['bert', ['BertForTokenClassification', BertForTokenClassification]], ['modernbert', ['ModernBertForTokenClassification', ModernBertForTokenClassification]], ['roformer', ['RoFormerForTokenClassification', RoFormerForTokenClassification]], ['electra', ['ElectraForTokenClassification', ElectraForTokenClassification]], ['esm', ['EsmForTokenClassification', EsmForTokenClassification]], ['convbert', ['ConvBertForTokenClassification', ConvBertForTokenClassification]], ['camembert', ['CamembertForTokenClassification', CamembertForTokenClassification]], ['deberta', ['DebertaForTokenClassification', DebertaForTokenClassification]], ['deberta-v2', ['DebertaV2ForTokenClassification', DebertaV2ForTokenClassification]], ['mpnet', ['MPNetForTokenClassification', MPNetForTokenClassification]], ['distilbert', ['DistilBertForTokenClassification', DistilBertForTokenClassification]], ['roberta', ['RobertaForTokenClassification', RobertaForTokenClassification]], ['xlm', ['XLMForTokenClassification', XLMForTokenClassification]], ['xlm-roberta', ['XLMRobertaForTokenClassification', XLMRobertaForTokenClassification]], ]); const MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = new Map([ ['t5', ['T5ForConditionalGeneration', T5ForConditionalGeneration]], ['longt5', ['LongT5ForConditionalGeneration', LongT5ForConditionalGeneration]], ['mt5', ['MT5ForConditionalGeneration', MT5ForConditionalGeneration]], ['bart', ['BartForConditionalGeneration', BartForConditionalGeneration]], ['mbart', ['MBartForConditionalGeneration', MBartForConditionalGeneration]], ['marian', ['MarianMTModel', MarianMTModel]], ['m2m_100', ['M2M100ForConditionalGeneration', M2M100ForConditionalGeneration]], ['blenderbot', ['BlenderbotForConditionalGeneration', BlenderbotForConditionalGeneration]], ['blenderbot-small', ['BlenderbotSmallForConditionalGeneration', BlenderbotSmallForConditionalGeneration]], ]); const MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = new Map([ ['bloom', ['BloomForCausalLM', BloomForCausalLM]], ['gpt2', ['GPT2LMHeadModel', GPT2LMHeadModel]], ['jais', ['JAISLMHeadModel', JAISLMHeadModel]], ['gptj', ['GPTJForCausalLM', GPTJForCausalLM]], ['gpt_bigcode', ['GPTBigCodeForCausalLM', GPTBigCodeForCausalLM]], ['gpt_neo', ['GPTNeoForCausalLM', GPTNeoForCausalLM]], ['gpt_neox', ['GPTNeoXForCausalLM', GPTNeoXForCausalLM]], ['codegen', ['CodeGenForCausalLM', CodeGenForCausalLM]], ['llama', ['LlamaForCausalLM', LlamaForCausalLM]], ['exaone', ['ExaoneForCausalLM', ExaoneForCausalLM]], ['olmo', ['OlmoForCausalLM', OlmoForCausalLM]], ['olmo2', ['Olmo2ForCausalLM', Olmo2ForCausalLM]], ['mobilellm', ['MobileLLMForCausalLM', MobileLLMForCausalLM]], ['granite', ['GraniteForCausalLM', GraniteForCausalLM]], ['cohere', ['CohereForCausalLM', CohereForCausalLM]], ['gemma', ['GemmaForCausalLM', GemmaForCausalLM]], ['gemma2', ['Gemma2ForCausalLM', Gemma2ForCausalLM]], ['helium', ['HeliumForCausalLM', HeliumForCausalLM]], ['glm', ['GlmForCausalLM', GlmForCausalLM]], ['openelm', ['OpenELMForCausalLM', OpenELMForCausalLM]], ['qwen2', ['Qwen2ForCausalLM', Qwen2ForCausalLM]], ['phi', ['PhiForCausalLM', PhiForCausalLM]], ['phi3', ['Phi3ForCausalLM', Phi3ForCausalLM]], ['mpt', ['MptForCausalLM', MptForCausalLM]], ['opt', ['OPTForCausalLM', OPTForCausalLM]], ['mbart', ['MBartForCausalLM', MBartForCausalLM]], ['mistral', ['MistralForCausalLM', MistralForCausalLM]], ['starcoder2', ['Starcoder2ForCausalLM', Starcoder2ForCausalLM]], ['falcon', ['FalconForCausalLM', FalconForCausalLM]], ['trocr', ['TrOCRForCausalLM', TrOCRForCausalLM]], ['stablelm', ['StableLmForCausalLM', StableLmForCausalLM]], // Also image-text-to-text ['phi3_v', ['Phi3VForCausalLM', Phi3VForCausalLM]], ]); const MODEL_FOR_MULTIMODALITY_MAPPING_NAMES = new Map([ ['multi_modality', ['MultiModalityCausalLM', MultiModalityCausalLM]], ]); const MODEL_FOR_MASKED_LM_MAPPING_NAMES = new Map([ ['bert', ['BertForMaskedLM', BertForMaskedLM]], ['modernbert', ['ModernBertForMaskedLM', ModernBertForMaskedLM]], ['roformer', ['RoFormerForMaskedLM', RoFormerForMaskedLM]], ['electra', ['ElectraForMaskedLM', ElectraForMaskedLM]], ['esm', ['EsmForMaskedLM', EsmForMaskedLM]], ['convbert', ['ConvBertForMaskedLM', ConvBertForMaskedLM]], ['camembert', ['CamembertForMaskedLM', CamembertForMaskedLM]], ['deberta', ['DebertaForMaskedLM', DebertaForMaskedLM]], ['deberta-v2', ['DebertaV2ForMaskedLM', DebertaV2ForMaskedLM]], ['mpnet', ['MPNetForMaskedLM', MPNetForMaskedLM]], ['albert', ['AlbertForMaskedLM', AlbertForMaskedLM]], ['distilbert', ['DistilBertForMaskedLM', DistilBertForMaskedLM]], ['roberta', ['RobertaForMaskedLM', RobertaForMaskedLM]], ['xlm', ['XLMWithLMHeadModel', XLMWithLMHeadModel]], ['xlm-roberta', ['XLMRobertaForMaskedLM', XLMRobertaForMaskedLM]], ['mobilebert', ['MobileBertForMaskedLM', MobileBertForMaskedLM]], ['squeezebert', ['SqueezeBertForMaskedLM', SqueezeBertForMaskedLM]], ]); const MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = new Map([ ['bert', ['BertForQuestionAnswering', BertForQuestionAnswering]], ['roformer', ['RoFormerForQuestionAnswering', RoFormerForQuestionAnswering]], ['electra', ['ElectraForQuestionAnswering', ElectraForQuestionAnswering]], ['convbert', ['ConvBertForQuestionAnswering', ConvBertForQuestionAnswering]], ['camembert', ['CamembertForQuestionAnswering', CamembertForQuestionAnswering]], ['deberta', ['DebertaForQuestionAnswering', DebertaForQuestionAnswering]], ['deberta-v2', ['DebertaV2ForQuestionAnswering', DebertaV2ForQuestionAnswering]], ['mpnet', ['MPNetForQuestionAnswering', MPNetForQuestionAnswering]], ['albert', ['AlbertForQuestionAnswering', AlbertForQuestionAnswering]], ['distilbert', ['DistilBertForQuestionAnswering', DistilBertForQuestionAnswering]], ['roberta', ['RobertaForQuestionAnswering', RobertaForQuestionAnswering]], ['xlm', ['XLMForQuestionAnswering', XLMForQuestionAnswering]], ['xlm-roberta', ['XLMRobertaForQuestionAnswering', XLMRobertaForQuestionAnswering]], ['mobilebert', ['MobileBertForQuestionAnswering', MobileBertForQuestionAnswering]], ['squeezebert', ['SqueezeBertForQuestionAnswering', SqueezeBertForQuestionAnswering]], ]); const MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = new Map([ ['vision-encoder-decoder', ['VisionEncoderDecoderModel', VisionEncoderDecoderModel]], ['idefics3', ['Idefics3ForConditionalGeneration', Idefics3ForConditionalGeneration]], ]); const MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES = new Map([ ['llava', ['LlavaForConditionalGeneration', LlavaForConditionalGeneration]], ['llava_onevision', ['LlavaOnevisionForConditionalGeneration', LlavaOnevisionForConditionalGeneration]], ['moondream1', ['Moondream1ForConditionalGeneration', Moondream1ForConditionalGeneration]], ['florence2', ['Florence2ForConditionalGeneration', Florence2ForConditionalGeneration]], ['qwen2-vl', ['Qwen2VLForConditionalGeneration', Qwen2VLForConditionalGeneration]], ['idefics3', ['Idefics3ForConditionalGeneration', Idefics3ForConditionalGeneration]], ['paligemma', ['PaliGemmaForConditionalGeneration', PaliGemmaForConditionalGeneration]], ]); const MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = new Map([ ['vision-encoder-decoder', ['VisionEncoderDecoderModel', VisionEncoderDecoderModel]], ]); const MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = new Map([ ['vit', ['ViTForImageClassification', ViTForImageClassification]], ['ijepa', ['IJepaForImageClassification', IJepaForImageClassification]], ['pvt', ['PvtForImageClassification', PvtForImageClassification]], ['vit_msn', ['ViTMSNForImageClassification', ViTMSNForImageClassification]], ['fastvit', ['FastViTForImageClassification', FastViTForImageClassification]], ['mobilevit', ['MobileViTForImageClassification', MobileViTForImageClassification]], ['mobilevitv2', ['MobileViTV2ForImageClassification', MobileViTV2ForImageClassification]], ['beit', ['BeitForImageClassification', BeitForImageClassification]], ['deit', ['DeiTForImageClassification', DeiTForImageClassification]], ['hiera', ['HieraForImageClassification', HieraForImageClassification]], ['convnext', ['ConvNextForImageClassification', ConvNextForImageClassification]], ['convnextv2', ['ConvNextV2ForImageClassification', ConvNextV2ForImageClassification]], ['dinov2', ['Dinov2ForImageClassification', Dinov2ForImageClassification]], ['dinov2_with_registers', ['Dinov2WithRegistersForImageClassification', Dinov2WithRegistersForImageClassification]], ['resnet', ['ResNetForImageClassification', ResNetForImageClassification]], ['swin', ['SwinForImageClassification', SwinForImageClassification]], ['segformer', ['SegformerForImageClassification', SegformerForImageClassification]], ['efficientnet', ['EfficientNetForImageClassification', EfficientNetForImageClassification]], ['mobilenet_v1', ['MobileNetV1ForImageClassification', MobileNetV1ForImageClassification]], ['mobilenet_v2', ['MobileNetV2ForImageClassification', MobileNetV2ForImageClassification]], ['mobilenet_v3', ['MobileNetV3ForImageClassification', MobileNetV3ForImageClassification]], ['mobilenet_v4', ['MobileNetV4ForImageClassification', MobileNetV4ForImageClassification]], ]); const MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = new Map([ ['detr', ['DetrForObjectDetection', DetrForObjectDetection]], ['rt_detr', ['RTDetrForObjectDetection', RTDetrForObjectDetection]], ['table-transformer', ['TableTransformerForObjectDetection', TableTransformerForObjectDetection]], ['yolos', ['YolosForObjectDetection', YolosForObjectDetection]], ]); const MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = new Map([ ['owlvit', ['OwlViTForObjectDetection', OwlViTForObjectDetection]], ['owlv2', ['Owlv2ForObjectDetection', Owlv2ForObjectDetection]], ['grounding-dino', ['GroundingDinoForObjectDetection', GroundingDinoForObjectDetection]], ]); const MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = new Map([ // TODO: Do not add new models here ['detr', ['DetrForSegmentation', DetrForSegmentation]], ['clipseg', ['CLIPSegForImageSegmentation', CLIPSegForImageSegmentation]], ]); const MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = new Map([ ['segformer', ['SegformerForSemanticSegmentation', SegformerForSemanticSegmentation]], ['sapiens', ['SapiensForSemanticSegmentation', SapiensForSemanticSegmentation]], ]); const MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES = new Map([ ['detr', ['DetrForSegmentation', DetrForSegmentation]], ['maskformer', ['MaskFormerForInstanceSegmentation', MaskFormerForInstanceSegmentation]], ]); const MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = new Map([ ['sam', ['SamModel', SamModel]], ]); const MODEL_FOR_CTC_MAPPING_NAMES = new Map([ ['wav2vec2', ['Wav2Vec2ForCTC', Wav2Vec2ForCTC]], ['wav2vec2-bert', ['Wav2Vec2BertForCTC', Wav2Vec2BertForCTC]], ['unispeech', ['UniSpeechForCTC', UniSpeechForCTC]], ['unispeech-sat', ['UniSpeechSatForCTC', UniSpeechSatForCTC]], ['wavlm', ['WavLMForCTC', WavLMForCTC]], ['hubert', ['HubertForCTC', HubertForCTC]], ]); const MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = new Map([ ['wav2vec2', ['Wav2Vec2ForSequenceClassification', Wav2Vec2ForSequenceClassification]], ['wav2vec2-bert', ['Wav2Vec2BertForSequenceClassification', Wav2Vec2BertForSequenceClassification]], ['unispeech', ['UniSpeechForSequenceClassification', UniSpeechForSequenceClassification]], ['unispeech-sat', ['UniSpeechSatForSequenceClassification', UniSpeechSatForSequenceClassification]], ['wavlm', ['WavLMForSequenceClassification', WavLMForSequenceClassification]], ['hubert', ['HubertForSequenceClassification', HubertForSequenceClassification]], ['audio-spectrogram-transformer', ['ASTForAudioClassification', ASTForAudioClassification]], ]); const MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES = new Map([ ['wavlm', ['WavLMForXVector', WavLMForXVector]], ]); const MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES = new Map([ ['unispeech-sat', ['UniSpeechSatForAudioFrameClassification', UniSpeechSatForAudioFrameClassification]], ['wavlm', ['WavLMForAudioFrameClassification', WavLMForAudioFrameClassification]], ['wav2vec2', ['Wav2Vec2ForAudioFrameClassification', Wav2Vec2ForAudioFrameClassification]], ['pyannote', ['PyAnnoteForAudioFrameClassification', PyAnnoteForAudioFrameClassification]], ]); const MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES = new Map([ ['vitmatte', ['VitMatteForImageMatting', VitMatteForImageMatting]], ]); const MODEL_FOR_TIME_SERIES_PREDICTION_MAPPING_NAMES = new Map([ ['patchtst', ['PatchTSTForPrediction', PatchTSTForPrediction]], ['patchtsmixer', ['PatchTSMixerForPrediction', PatchTSMixerForPrediction]], ]) const MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = new Map([ ['swin2sr', ['Swin2SRForImageSuperResolution', Swin2SRForImageSuperResolution]], ]) const MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = new Map([ ['dpt', ['DPTForDepthEstimation', DPTForDepthEstimation]], ['depth_anything', ['DepthAnythingForDepthEstimation', DepthAnythingForDepthEstimation]], ['glpn', ['GLPNForDepthEstimation', GLPNForDepthEstimation]], ['sapiens', ['SapiensForDepthEstimation', SapiensForDepthEstimation]], ['depth_pro', ['DepthProForDepthEstimation', DepthProForDepthEstimation]], ]) const MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES = new Map([ ['sapiens', ['SapiensForNormalEstimation', SapiensForNormalEstimation]], ]) const MODEL_FOR_POSE_ESTIMATION_MAPPING_NAMES = new Map([ ['vitpose', ['VitPoseForPoseEstimation', VitPoseForPoseEstimation]], ]) // NOTE: This is custom to Transformers.js, and is necessary because certain models // (e.g., CLIP) are split into vision and text components const MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES = new Map([ ['clip', ['CLIPVisionModelWithProjection', CLIPVisionModelWithProjection]], ['siglip', ['SiglipVisionModel', SiglipVisionModel]], ['jina_clip', ['JinaCLIPVisionModel', JinaCLIPVisionModel]], ]) const MODEL_CLASS_TYPE_MAPPING = [ [MODEL_MAPPING_NAMES_ENCODER_ONLY, MODEL_TYPES.EncoderOnly], [MODEL_MAPPING_NAMES_ENCODER_DECODER, MODEL_TYPES.EncoderDecoder], [MODEL_MAPPING_NAMES_DECODER_ONLY, MODEL_TYPES.DecoderOnly], [MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_TYPES.DecoderOnly], [MODEL_FOR_MULTIMODALITY_MAPPING_NAMES, MODEL_TYPES.MultiModality], [MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES, MODEL_TYPES.Vision2Seq], [MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES, MODEL_TYPES.ImageTextToText], [MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_TIME_SERIES_PREDICTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_POSE_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_MASK_GENERATION_MAPPING_NAMES, MODEL_TYPES.MaskGeneration], [MODEL_FOR_CTC_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], // Custom: [MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], ]; for (const [mappings, type] of MODEL_CLASS_TYPE_MAPPING) { // @ts-ignore for (const [name, model] of mappings.values()) { MODEL_TYPE_MAPPING.set(name, type); MODEL_CLASS_TO_NAME_MAPPING.set(model, name); MODEL_NAME_TO_CLASS_MAPPING.set(name, model); } } const CUSTOM_MAPPING = [ // OVERRIDE: // TODO: Refactor to allow class to specify model ['MusicgenForConditionalGeneration', MusicgenForConditionalGeneration, MODEL_TYPES.Musicgen], ['Phi3VForCausalLM', Phi3VForCausalLM, MODEL_TYPES.Phi3V], ['CLIPTextModelWithProjection', CLIPTextModelWithProjection, MODEL_TYPES.EncoderOnly], ['SiglipTextModel', SiglipTextModel, MODEL_TYPES.EncoderOnly], ['JinaCLIPTextModel', JinaCLIPTextModel, MODEL_TYPES.EncoderOnly], ['ClapTextModelWithProjection', ClapTextModelWithProjection, MODEL_TYPES.EncoderOnly], ['ClapAudioModelWithProjection', ClapAudioModelWithProjection, MODEL_TYPES.EncoderOnly], ] for (const [name, model, type] of CUSTOM_MAPPING) { MODEL_TYPE_MAPPING.set(name, type); MODEL_CLASS_TO_NAME_MAPPING.set(model, name); MODEL_NAME_TO_CLASS_MAPPING.set(name, model); } /** * Helper class which is used to instantiate pretrained models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModel.from_pretrained('Xenova/bert-base-uncased'); */ export class AutoModel extends PretrainedMixin { /** @type {Map<string, Object>[]} */ // @ts-ignore static MODEL_CLASS_MAPPINGS = MODEL_CLASS_TYPE_MAPPING.map(x => x[0]); static BASE_IF_FAIL = true; } /** * Helper class which is used to instantiate pretrained sequence classification models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForSequenceClassification.from_pretrained('Xenova/distilbert-base-uncased-finetuned-sst-2-english'); */ export class AutoModelForSequenceClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained token classification models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForTokenClassification.from_pretrained('Xenova/distilbert-base-multilingual-cased-ner-hrl'); */ export class AutoModelForTokenClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained sequence-to-sequence models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForSeq2SeqLM.from_pretrained('Xenova/t5-small'); */ export class AutoModelForSeq2SeqLM extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained sequence-to-sequence speech-to-text models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForSpeechSeq2Seq.from_pretrained('openai/whisper-tiny.en'); */ export class AutoModelForSpeechSeq2Seq extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained sequence-to-sequence text-to-spectrogram models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForTextToSpectrogram.from_pretrained('microsoft/speecht5_tts'); */ export class AutoModelForTextToSpectrogram extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained text-to-waveform models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForTextToSpectrogram.from_pretrained('facebook/mms-tts-eng'); */ export class AutoModelForTextToWaveform extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained causal language models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForCausalLM.from_pretrained('Xenova/gpt2'); */ export class AutoModelForCausalLM extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_CAUSAL_LM_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained masked language models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForMaskedLM.from_pretrained('Xenova/bert-base-uncased'); */ export class AutoModelForMaskedLM extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_MASKED_LM_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained question answering models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForQuestionAnswering.from_pretrained('Xenova/distilbert-base-cased-distilled-squad'); */ export class AutoModelForQuestionAnswering extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained vision-to-sequence models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForVision2Seq.from_pretrained('Xenova/vit-gpt2-image-captioning'); */ export class AutoModelForVision2Seq extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained image classification models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForImageClassification.from_pretrained('Xenova/vit-base-patch16-224'); */ export class AutoModelForImageClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained image segmentation models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForImageSegmentation.from_pretrained('Xenova/detr-resnet-50-panoptic'); */ export class AutoModelForImageSegmentation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained image segmentation models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForSemanticSegmentation.from_pretrained('nvidia/segformer-b3-finetuned-cityscapes-1024-1024'); */ export class AutoModelForSemanticSegmentation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained universal image segmentation models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForUniversalSegmentation.from_pretrained('hf-internal-testing/tiny-random-MaskFormerForInstanceSegmentation'); */ export class AutoModelForUniversalSegmentation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained object detection models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForObjectDetection.from_pretrained('Xenova/detr-resnet-50'); */ export class AutoModelForObjectDetection extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES]; } export class AutoModelForZeroShotObjectDetection extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained mask generation models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForMaskGeneration.from_pretrained('Xenova/sam-vit-base'); */ export class AutoModelForMaskGeneration extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_MASK_GENERATION_MAPPING_NAMES]; } export class AutoModelForCTC extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_CTC_MAPPING_NAMES]; } export class AutoModelForAudioClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES]; } export class AutoModelForXVector extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES]; } export class AutoModelForAudioFrameClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES]; } export class AutoModelForDocumentQuestionAnswering extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES]; } export class AutoModelForImageMatting extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES]; } export class AutoModelForImageToImage extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES]; } export class AutoModelForDepthEstimation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES]; } export class AutoModelForNormalEstimation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES]; } export class AutoModelForPoseEstimation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_POSE_ESTIMATION_MAPPING_NAMES]; } export class AutoModelForImageFeatureExtraction extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES]; } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Seq2SeqLMOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits The output logits of the model. * @param {Tensor} output.past_key_values An tensor of key/value pairs that represent the previous state of the model. * @param {Tensor} output.encoder_outputs The output of the encoder in a sequence-to-sequence model. * @param {Tensor} [output.decoder_attentions] Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. * @param {Tensor} [output.cross_attentions] Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. */ constructor({ logits, past_key_values, encoder_outputs, decoder_attentions = null, cross_attentions = null }) { super(); this.logits = logits; this.past_key_values = past_key_values; this.encoder_outputs = encoder_outputs; this.decoder_attentions = decoder_attentions; this.cross_attentions = cross_attentions; } } /** * Base class for outputs of sentence classification models. */ export class SequenceClassifierOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits classification (or regression if config.num_labels==1) scores (before SoftMax). * @param {Record<string, Tensor>} [output.attentions] Object of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. * Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. */ constructor({ logits, ...attentions }) { super(); this.logits = logits; const attentions_list = Object.values(attentions); if (attentions_list.length > 0) { // Only set attentions if they are not empty this.attentions = attentions_list; } } } /** * Base class for outputs of XVector models. */ export class XVectorOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification hidden states before AMSoftmax, of shape `(batch_size, config.xvector_output_dim)`. * @param {Tensor} output.embeddings Utterance embeddings used for vector similarity-based retrieval, of shape `(batch_size, config.xvector_output_dim)`. */ constructor({ logits, embeddings }) { super(); this.logits = logits; this.embeddings = embeddings; } } /** * Base class for outputs of token classification models. */ export class TokenClassifierOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification scores (before SoftMax). */ constructor({ logits }) { super(); this.logits = logits; } } /** * Base class for masked language models outputs. */ export class MaskedLMOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). */ constructor({ logits }) { super(); this.logits = logits; } } /** * Base class for outputs of question answering models. */ export class QuestionAnsweringModelOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.start_logits Span-start scores (before SoftMax). * @param {Tensor} output.end_logits Span-end scores (before SoftMax). */ constructor({ start_logits, end_logits }) { super(); this.start_logits = start_logits; this.end_logits = end_logits; } } /** * Base class for causal language model (or autoregressive) outputs. */ export class CausalLMOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before softmax). */ constructor({ logits }) { super(); this.logits = logits; } } /** * Base class for causal language model (or autoregressive) outputs. */ export class CausalLMOutputWithPast extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before softmax). * @param {Tensor} output.past_key_values Contains pre-computed hidden-states (key and values in the self-attention blocks) * that can be used (see `past_key_values` input) to speed up sequential decoding. */ constructor({ logits, past_key_values }) { super(); this.logits = logits; this.past_key_values = past_key_values; } } export class ImageMattingOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.alphas Estimated alpha values, of shape `(batch_size, num_channels, height, width)`. */ constructor({ alphas }) { super(); this.alphas = alphas; } } /** * Describes the outputs for the VITS model. */ export class VitsModelOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.waveform The final audio waveform predicted by the model, of shape `(batch_size, sequence_length)`. * @param {Tensor} output.spectrogram The log-mel spectrogram predicted at the output of the flow model. * This spectrogram is passed to the Hi-Fi GAN decoder model to obtain the final audio waveform. */ constructor({ waveform, spectrogram }) { super(); this.waveform = waveform; this.spectrogram = spectrogram; } }
transformers.js/src/models.js/0
{ "file_path": "transformers.js/src/models.js", "repo_id": "transformers.js", "token_count": 115347 }
export * from './audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js'; export * from './clap/feature_extraction_clap.js'; export * from './moonshine/feature_extraction_moonshine.js'; export * from './pyannote/feature_extraction_pyannote.js'; export * from './seamless_m4t/feature_extraction_seamless_m4t.js'; export * from './speecht5/feature_extraction_speecht5.js'; export * from './wav2vec2/feature_extraction_wav2vec2.js'; export * from './wespeaker/feature_extraction_wespeaker.js'; export * from './whisper/feature_extraction_whisper.js'; // For legacy support, ImageFeatureExtractor is an alias for ImageProcessor export { ImageProcessor as ImageFeatureExtractor } from "../base/image_processors_utils.js";
transformers.js/src/models/feature_extractors.js/0
{ "file_path": "transformers.js/src/models/feature_extractors.js", "repo_id": "transformers.js", "token_count": 254 }
import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js'; import { Tensor } from '../../utils/tensor.js'; import { max, softmax } from '../../utils/maths.js'; export class PyAnnoteFeatureExtractor extends FeatureExtractor { /** * Asynchronously extracts features from a given audio using the provided configuration. * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. * @returns {Promise<{ input_values: Tensor; }>} The extracted input features. */ async _call(audio) { validate_audio_inputs(audio, 'PyAnnoteFeatureExtractor'); if (audio instanceof Float64Array) { audio = new Float32Array(audio); } const shape = [ 1, /* batch_size */ 1, /* num_channels */ audio.length, /* num_samples */ ]; return { input_values: new Tensor('float32', audio, shape), }; } /** * NOTE: Can return fractional values. `Math.ceil` will ensure correct value. * @param {number} samples The number of frames in the audio. * @returns {number} The number of frames in the audio. */ samples_to_frames(samples) { return ((samples - this.config.offset) / this.config.step); } /** * Post-processes the speaker diarization logits output by the model. * @param {import('../../utils/tensor.js').Tensor} logits The speaker diarization logits output by the model. * @param {number} num_samples Number of samples in the input audio. * @returns {Array<Array<{ id: number, start: number, end: number, confidence: number }>>} The post-processed speaker diarization results. */ post_process_speaker_diarization(logits, num_samples) { const ratio = ( num_samples / this.samples_to_frames(num_samples) ) / this.config.sampling_rate; const results = []; for (const scores of logits.tolist()) { const accumulated_segments = []; let current_speaker = -1; for (let i = 0; i < scores.length; ++i) { /** @type {number[]} */ const probabilities = softmax(scores[i]); const [score, id] = max(probabilities); const [start, end] = [i, i + 1]; if (id !== current_speaker) { // Speaker has changed current_speaker = id; accumulated_segments.push({ id, start, end, score }); } else { // Continue the current segment accumulated_segments.at(-1).end = end; accumulated_segments.at(-1).score += score; } } results.push(accumulated_segments.map( // Convert frame-space to time-space // and compute the confidence ({ id, start, end, score }) => ({ id, start: start * ratio, end: end * ratio, confidence: score / (end - start), }) )); } return results; } }
transformers.js/src/models/pyannote/feature_extraction_pyannote.js/0
{ "file_path": "transformers.js/src/models/pyannote/feature_extraction_pyannote.js", "repo_id": "transformers.js", "token_count": 1478 }
import { ImageProcessor, } from "../../base/image_processors_utils.js"; export class VitPoseImageProcessor extends ImageProcessor { /** * Transform the heatmaps into keypoint predictions and transform them back to the image. * NOTE: This is a naive implementation and does not include advanced post-processing techniques, * so the results may not be as accurate as the original implementation. * @param {import('../../utils/tensor.js').Tensor} outputs The model outputs. * @param {[number, number, number, number][][]} boxes List or array of bounding boxes for each image. * Each box should be a list of 4 floats representing the bounding box coordinates in COCO format (top_left_x, top_left_y, width, height). * @returns {{ * bbox: [number, number, number, number], * scores: number[], * labels: number[], * keypoints: [number, number][] * }[][]} List of keypoints predictions for each image. */ post_process_pose_estimation(outputs, boxes, { threshold = null, // TODO: // kernel_size = 11, // target_sizes = null, } = {}) { // NOTE: boxes are 3D (batch_size, num_boxes, 4) const heatmaps = outputs.tolist(); const [batch_size, num_classes, height, width] = outputs.dims; const results = []; for (let b = 0; b < batch_size; ++b) { const heatmap = heatmaps[b]; const bboxes = boxes[b]; const batch_results = []; for (let n = 0; n < bboxes.length; ++n) { const bbox = bboxes[n]; const keypoints = []; const scores = []; const labels = []; const xScale = bbox.at(-2) / width; const yScale = bbox.at(-1) / height; for (let c = 0; c < heatmap.length; ++c) { let [xWeightedSum, yWeightedSum] = [0, 0]; let sum = 0; let score = -Infinity; const row = heatmap[c]; for (let y = 0; y < row.length; ++y) { const col = row[y]; for (let x = 0; x < col.length; ++x) { const value = col[x]; sum += value; score = Math.max(score, value); // Get weighted sum of positions // TODO: Determine best offsets xWeightedSum += (x + 0.5) * value; yWeightedSum += (y) * value; } } // Ignore low scores, if threshold is set if (threshold != null && score < threshold) continue; /** @type {[number, number]} */ const keypoint = [ xScale * xWeightedSum / sum, yScale * yWeightedSum / sum, ] keypoints.push(keypoint); labels.push(c); scores.push(score); } batch_results.push({ bbox, scores, labels, keypoints, }); } results.push(batch_results); } return results; } }
transformers.js/src/models/vitpose/image_processing_vitpose.js/0
{ "file_path": "transformers.js/src/models/vitpose/image_processing_vitpose.js", "repo_id": "transformers.js", "token_count": 1796 }
/** * @file Core utility functions/classes for Transformers.js. * * These are only used internally, meaning an end-user shouldn't * need to access anything here. * * @module utils/core */ /** * @typedef {Object} InitiateProgressInfo * @property {'initiate'} status * @property {string} name The model id or directory path. * @property {string} file The name of the file. */ /** * @typedef {Object} DownloadProgressInfo * @property {'download'} status * @property {string} name The model id or directory path. * @property {string} file The name of the file. */ /** * @typedef {Object} ProgressStatusInfo * @property {'progress'} status * @property {string} name The model id or directory path. * @property {string} file The name of the file. * @property {number} progress A number between 0 and 100. * @property {number} loaded The number of bytes loaded. * @property {number} total The total number of bytes to be loaded. */ /** * @typedef {Object} DoneProgressInfo * @property {'done'} status * @property {string} name The model id or directory path. * @property {string} file The name of the file. */ /** * @typedef {Object} ReadyProgressInfo * @property {'ready'} status * @property {string} task The loaded task. * @property {string} model The loaded model. */ /** * @typedef {InitiateProgressInfo | DownloadProgressInfo | ProgressStatusInfo | DoneProgressInfo | ReadyProgressInfo} ProgressInfo */ /** * A callback function that is called with progress information. * @callback ProgressCallback * @param {ProgressInfo} progressInfo * @returns {void} */ /** * Helper function to dispatch progress callbacks. * * @param {ProgressCallback | null | undefined} progress_callback The progress callback function to dispatch. * @param {ProgressInfo} data The data to pass to the progress callback function. * @returns {void} * @private */ export function dispatchCallback(progress_callback, data) { if (progress_callback) progress_callback(data); } /** * Reverses the keys and values of an object. * * @param {Object} data The object to reverse. * @returns {Object} The reversed object. * @see https://ultimatecourses.com/blog/reverse-object-keys-and-values-in-javascript */ export function reverseDictionary(data) { // https://ultimatecourses.com/blog/reverse-object-keys-and-values-in-javascript return Object.fromEntries(Object.entries(data).map(([key, value]) => [value, key])); } /** * Escapes regular expression special characters from a string by replacing them with their escaped counterparts. * * @param {string} string The string to escape. * @returns {string} The escaped string. */ export function escapeRegExp(string) { return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string } /** * Check if a value is a typed array. * @param {*} val The value to check. * @returns {boolean} True if the value is a `TypedArray`, false otherwise. * * Adapted from https://stackoverflow.com/a/71091338/13989043 */ export function isTypedArray(val) { return val?.prototype?.__proto__?.constructor?.name === 'TypedArray'; } /** * Check if a value is an integer. * @param {*} x The value to check. * @returns {boolean} True if the value is a string, false otherwise. */ export function isIntegralNumber(x) { return Number.isInteger(x) || typeof x === 'bigint' } /** * Determine if a provided width or height is nullish. * @param {*} x The value to check. * @returns {boolean} True if the value is `null`, `undefined` or `-1`, false otherwise. */ export function isNullishDimension(x) { return x === null || x === undefined || x === -1; } /** * Calculates the dimensions of a nested array. * * @param {any[]} arr The nested array to calculate dimensions for. * @returns {number[]} An array containing the dimensions of the input array. */ export function calculateDimensions(arr) { const dimensions = []; let current = arr; while (Array.isArray(current)) { dimensions.push(current.length); current = current[0]; } return dimensions; } /** * Replicate python's .pop() method for objects. * @param {Object} obj The object to pop from. * @param {string} key The key to pop. * @param {*} defaultValue The default value to return if the key does not exist. * @returns {*} The value of the popped key. * @throws {Error} If the key does not exist and no default value is provided. */ export function pop(obj, key, defaultValue = undefined) { const value = obj[key]; if (value !== undefined) { delete obj[key]; return value; } if (defaultValue === undefined) { throw Error(`Key ${key} does not exist in object.`) } return defaultValue; } /** * Efficiently merge arrays, creating a new copy. * Adapted from https://stackoverflow.com/a/6768642/13989043 * @param {Array[]} arrs Arrays to merge. * @returns {Array} The merged array. */ export function mergeArrays(...arrs) { return Array.prototype.concat.apply([], arrs); } /** * Compute the Cartesian product of given arrays * @param {...Array} a Arrays to compute the product * @returns {Array} Returns the computed Cartesian product as an array * @private */ export function product(...a) { // Cartesian product of items // Adapted from https://stackoverflow.com/a/43053803 return a.reduce((a, b) => a.flatMap(d => b.map(e => [d, e]))); } /** * Calculates the index offset for a given index and window size. * @param {number} i The index. * @param {number} w The window size. * @returns {number} The index offset. */ export function calculateReflectOffset(i, w) { return Math.abs((i + w) % (2 * w) - w); } /** * Save blob file on the web. * @param {string} path The path to save the blob to * @param {Blob} blob The blob to save */ export function saveBlob(path, blob){ // Convert the canvas content to a data URL const dataURL = URL.createObjectURL(blob); // Create an anchor element with the data URL as the href attribute const downloadLink = document.createElement('a'); downloadLink.href = dataURL; // Set the download attribute to specify the desired filename for the downloaded image downloadLink.download = path; // Trigger the download downloadLink.click(); // Clean up: remove the anchor element from the DOM downloadLink.remove(); // Revoke the Object URL to free up memory URL.revokeObjectURL(dataURL); } /** * * @param {Object} o * @param {string[]} props * @returns {Object} */ export function pick(o, props) { return Object.assign( {}, ...props.map((prop) => { if (o[prop] !== undefined) { return { [prop]: o[prop] }; } }) ); } /** * Calculate the length of a string, taking multi-byte characters into account. * This mimics the behavior of Python's `len` function. * @param {string} s The string to calculate the length of. * @returns {number} The length of the string. */ export function len(s) { let length = 0; for (const c of s) ++length; return length; } /** * Count the occurrences of a value in an array or string. * This mimics the behavior of Python's `count` method. * @param {any[]|string} arr The array or string to search. * @param {any} value The value to count. */ export function count(arr, value) { let count = 0; for (const v of arr) { if (v === value) ++count; } return count; }
transformers.js/src/utils/core.js/0
{ "file_path": "transformers.js/src/utils/core.js", "repo_id": "transformers.js", "token_count": 2392 }
import { GPT2Tokenizer, GPT2LMHeadModel } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { describe("GPT2LMHeadModel", () => { const model_id = "hf-internal-testing/tiny-random-GPT2LMHeadModel"; /** @type {GPT2LMHeadModel} */ let model; /** @type {GPT2Tokenizer} */ let tokenizer; beforeAll(async () => { model = await GPT2LMHeadModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await GPT2Tokenizer.from_pretrained(model_id); tokenizer.padding_side = "left"; }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("hello"); const outputs = await model.generate({ ...inputs, max_length: 10, }); expect(outputs.tolist()).toEqual([[258n, 863n, 79n, 79n, 79n, 79n, 79n, 79n, 79n, 243n]]); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = tokenizer(["hello", "hello world"], { padding: true }); const outputs = await model.generate({ ...inputs, max_length: 10, }); expect(outputs.tolist()).toEqual([ [0n, 0n, 258n, 863n, 79n, 79n, 79n, 79n, 79n, 79n], [258n, 863n, 79n, 269n, 813n, 813n, 813n, 813n, 813n, 813n], ]); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/gpt2/test_modeling_gpt2.js/0
{ "file_path": "transformers.js/tests/models/gpt2/test_modeling_gpt2.js", "repo_id": "transformers.js", "token_count": 769 }
import { LlamaTokenizer, LlamaForCausalLM } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, MAX_TEST_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { describe("LlamaForCausalLM", () => { const model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM"; /** @type {LlamaForCausalLM} */ let model; /** @type {LlamaTokenizer} */ let tokenizer; beforeAll(async () => { model = await LlamaForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await LlamaTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("hello"); const outputs = await model.generate({ ...inputs, max_length: 10, }); expect(outputs.tolist()).toEqual([[1n, 22172n, 18547n, 8143n, 22202n, 9456n, 17213n, 15330n, 26591n, 15721n]]); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = tokenizer(["hello", "hello world"], { padding: true }); const outputs = await model.generate({ ...inputs, max_length: 10, }); expect(outputs.tolist()).toEqual([ [0n, 1n, 22172n, 18547n, 8143n, 22202n, 9456n, 17213n, 15330n, 26591n], [1n, 22172n, 3186n, 24786n, 19169n, 20222n, 29993n, 27146n, 27426n, 24562n], ]); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("LlamaForCausalLM (onnxruntime-genai)", () => { const model_id = "onnx-community/tiny-random-LlamaForCausalLM-ONNX"; /** @type {LlamaTokenizer} */ let tokenizer; let inputs; beforeAll(async () => { tokenizer = await LlamaTokenizer.from_pretrained(model_id); inputs = tokenizer("hello"); }, MAX_MODEL_LOAD_TIME); const dtypes = ["fp32", "fp16", "q4", "q4f16"]; for (const dtype of dtypes) { it( `dtype=${dtype}`, async () => { /** @type {LlamaForCausalLM} */ const model = await LlamaForCausalLM.from_pretrained(model_id, { ...DEFAULT_MODEL_OPTIONS, dtype, }); const outputs = await model.generate({ ...inputs, max_length: 5, }); expect(outputs.tolist()).toEqual([[128000n, 15339n, 15339n, 15339n, 15339n]]); await model?.dispose(); }, MAX_TEST_TIME, ); } }); };
transformers.js/tests/models/llama/test_modeling_llama.js/0
{ "file_path": "transformers.js/tests/models/llama/test_modeling_llama.js", "repo_id": "transformers.js", "token_count": 1273 }
import { AutoImageProcessor, SamImageProcessor } from "../../../src/transformers.js"; import { load_cached_image } from "../../asset_cache.js"; import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js"; export default () => { // SamImageProcessor // - tests normal padding (do_pad=true, pad_size={"height":1024,"width":1024}) // - In addition to the image, pass in a list of points describe("SamImageProcessor", () => { const model_id = "Xenova/sam-vit-base"; /** @type {SamImageProcessor} */ let processor; beforeAll(async () => { processor = await AutoImageProcessor.from_pretrained(model_id); }, MAX_PROCESSOR_LOAD_TIME); it( "without input points", async () => { const image = await load_cached_image("pattern_3x3"); const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image); expect(pixel_values.dims).toEqual([1, 3, 1024, 1024]); expect(pixel_values.mean().item()).toBeCloseTo(-0.4505715670146813, 6); expect(original_sizes).toEqual([[3, 3]]); expect(reshaped_input_sizes).toEqual([[1024, 1024]]); }, MAX_TEST_EXECUTION_TIME, ); it( "with input points", async () => { const image = await load_cached_image("pattern_3x3"); const { original_sizes, reshaped_input_sizes, input_points } = await processor(image, { input_points: [[[1, 2]]], }); expect(original_sizes).toEqual([[3, 3]]); expect(reshaped_input_sizes).toEqual([[1024, 1024]]); expect(input_points.tolist()).toBeCloseToNested([[[[341.3333, 682.6667]]]], 4); }, MAX_TEST_EXECUTION_TIME, ); it( "multiple points with labels", async () => { const image = await load_cached_image("pattern_3x3"); const { original_sizes, reshaped_input_sizes, input_points, input_labels } = await processor(image, { input_points: [ [ [1, 2], [2, 1], ], ], input_labels: [[1, 0]], }); expect(original_sizes).toEqual([[3, 3]]); expect(reshaped_input_sizes).toEqual([[1024, 1024]]); expect(input_points.tolist()).toBeCloseToNested( [ [ [ [341.3333, 682.6667], [682.6667, 341.3333], ], ], ], 4, ); expect(input_labels.tolist()).toEqual([[[1n, 0n]]]); }, MAX_TEST_EXECUTION_TIME, ); it( "with input boxes", async () => { const image = await load_cached_image("pattern_3x3"); const { original_sizes, reshaped_input_sizes, input_boxes } = await processor(image, { input_boxes: [[[0, 1, 2, 2]]], }); expect(original_sizes).toEqual([[3, 3]]); expect(reshaped_input_sizes).toEqual([[1024, 1024]]); expect(input_boxes.tolist()).toBeCloseToNested([[[0, 341.3333, 682.6667, 682.6667]]], 4); }, MAX_TEST_EXECUTION_TIME, ); }); };
transformers.js/tests/models/sam/test_image_processing_sam.js/0
{ "file_path": "transformers.js/tests/models/sam/test_image_processing_sam.js", "repo_id": "transformers.js", "token_count": 1475 }
import { WhisperTokenizer } from "../../../src/tokenizers.js"; import { BASE_TEST_STRINGS, WHISPER_TEST_STRINGS } from "../test_strings.js"; import { compare } from "../../test_utils.js"; export const TOKENIZER_CLASS = WhisperTokenizer; export const TEST_CONFIG = { "onnx-community/whisper-tiny.en": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["How", "\u0120are", "\u0120you", "\u0120doing", "?"], ids: [50257, 50362, 2437, 389, 345, 1804, 30, 50256], decoded: "<|startoftranscript|><|notimestamps|>How are you doing?<|endoftext|>", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["You", "\u0120should", "'ve", "\u0120done", "\u0120this"], ids: [50257, 50362, 1639, 815, 1053, 1760, 428, 50256], decoded: "<|startoftranscript|><|notimestamps|>You should've done this<|endoftext|>", }, NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["01", "23", "45", "67", "89", "\u01200", "\u01201", "\u01202", "\u01203", "\u01204", "\u01205", "\u01206", "\u01207", "\u01208", "\u01209", "\u012010", "\u0120100", "\u01201000"], ids: [50257, 50362, 486, 1954, 2231, 3134, 4531, 657, 352, 362, 513, 604, 642, 718, 767, 807, 860, 838, 1802, 8576, 50256], decoded: "<|startoftranscript|><|notimestamps|>0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000<|endoftext|>", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["The", "\u0120company", "\u0120was", "\u0120founded", "\u0120in", "\u01202016", "."], ids: [50257, 50362, 464, 1664, 373, 9393, 287, 1584, 13, 50256], decoded: "<|startoftranscript|><|notimestamps|>The company was founded in 2016.<|endoftext|>", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["A", "\u010a", "'ll", "\u0120!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'t", "."], ids: [50257, 50362, 32, 198, 1183, 37867, 1462, 8348, 67, 7061, 67, 286, 11, 460, 470, 13, 50256], decoded: "<|startoftranscript|><|notimestamps|>A\n'll!!to?'d''d of, can't.<|endoftext|>", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["def", "\u0120main", "():", "\u010a", "\u0109", "pass"], ids: [50257, 50362, 4299, 1388, 33529, 198, 197, 6603, 50256], decoded: "<|startoftranscript|><|notimestamps|>def main():\n\tpass<|endoftext|>", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "\u0120a", "\u0120=", "\u0120obj", ".", "to", "String", "();", "\u010a", "to", "String", "();"], ids: [50257, 50362, 1616, 257, 796, 26181, 13, 1462, 10100, 9783, 198, 1462, 10100, 9783, 50256], decoded: "<|startoftranscript|><|notimestamps|>let a = obj.toString();\ntoString();<|endoftext|>", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["This", "\u010a", "\u010a", "is", "\u010a", "a", "\u010a", "test", "."], ids: [50257, 50362, 1212, 198, 198, 271, 198, 64, 198, 9288, 13, 50256], decoded: "<|startoftranscript|><|notimestamps|>This\n\nis\na\ntest.<|endoftext|>", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["UN", "want", "\u00c3\u00a9", "d", ",", "running"], ids: [50257, 50362, 4944, 42949, 2634, 67, 11, 20270, 50256], decoded: "<|startoftranscript|><|notimestamps|>UNwant\u00e9d,running<|endoftext|>", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["1", "\u0100", "2", "\u00ef\u00bf\u00bd", "3"], ids: [50257, 50362, 16, 188, 17, 4210, 18, 50256], decoded: "<|startoftranscript|><|notimestamps|>1\u00002\ufffd3<|endoftext|>", }, HELLO_WORLD_TITLECASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE, tokens: ["Hello", "\u0120World"], ids: [50257, 50362, 15496, 2159, 50256], decoded: "<|startoftranscript|><|notimestamps|>Hello World<|endoftext|>", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["hello", "\u0120world"], ids: [50257, 50362, 31373, 995, 50256], decoded: "<|startoftranscript|><|notimestamps|>hello world<|endoftext|>", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u00e7\u0136\u0141", "\u00e6", "\u00b4", "\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e", "\u0141", "\u00e8", "\u00b0", "\u013d", "\u00e6\u013a\u00af"], ids: [50257, 50362, 37955, 162, 112, 119, 21410, 40367, 253, 164, 108, 249, 42468, 50256], decoded: "<|startoftranscript|><|notimestamps|>\u751f\u6d3b\u7684\u771f\u8c1b\u662f<|endoftext|>", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["\u0120", "\u0120", "\u0120leading", "\u0120space"], ids: [50257, 50362, 220, 220, 3756, 2272, 50256], decoded: "<|startoftranscript|><|notimestamps|> leading space<|endoftext|>", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["tra", "iling", "\u0120space", "\u0120", "\u0120", "\u0120"], ids: [50257, 50362, 9535, 4386, 2272, 220, 220, 220, 50256], decoded: "<|startoftranscript|><|notimestamps|>trailing space <|endoftext|>", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["Hi", "\u0120", "\u0120Hello"], ids: [50257, 50362, 17250, 220, 18435, 50256], decoded: "<|startoftranscript|><|notimestamps|>Hi Hello<|endoftext|>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120test"], ids: [50257, 50362, 9288, 720, 16, 371, 17, 1303, 18, 10432, 19, 4248, 20, 38221, 21, 2343, 224, 96, 22, 2343, 224, 117, 23, 2343, 224, 109, 24, 1332, 50256], decoded: "<|startoftranscript|><|notimestamps|>test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test<|endoftext|>", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "\u0120bought", "\u0120an", "\u0120apple", "\u0120for", "\u0120$", "1", ".", "00", "\u0120at", "\u0120the", "\u0120store", "."], ids: [50257, 50362, 40, 5839, 281, 17180, 329, 720, 16, 13, 405, 379, 262, 3650, 13, 50256], decoded: "<|startoftranscript|><|notimestamps|>I bought an apple for $1.00 at the store.<|endoftext|>", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["you", "\u00e2\u0122\u00a6", "\u0120", "\u0120"], ids: [50257, 50362, 5832, 1399, 220, 220, 50256], decoded: "<|startoftranscript|><|notimestamps|>you\u2026 <|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2\u0142\u00c2\u0142"], ids: [50257, 50362, 5832, 1399, 4603, 50256], decoded: "<|startoftranscript|><|notimestamps|>you\u2026\u00a0\u00a0<|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2\u0142", "\u00c2\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2\u0142\u00c2\u0142"], ids: [50257, 50362, 5832, 1399, 1849, 1849, 5832, 1399, 4603, 50256], decoded: "<|startoftranscript|><|notimestamps|>you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0<|endoftext|>", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["we", "ird", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120edge", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120case"], ids: [50257, 50362, 732, 1447, 27332, 121, 252, 5743, 27332, 121, 252, 1339, 50256], decoded: "<|startoftranscript|><|notimestamps|>weird \uff5e edge \uff5e case<|endoftext|>", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["\u00e2\u0138", "\u0123", "This", "\u0120\u00e2\u0138", "\u0123", "is", "\u0120\u00e2\u0138", "\u0123", "a", "\u0120\u00e2\u0138", "\u0123", "test", "\u0120\u00e2\u0138", "\u0123", "."], ids: [50257, 50362, 5008, 223, 1212, 11019, 223, 271, 11019, 223, 64, 11019, 223, 9288, 11019, 223, 13, 50256], decoded: "<|startoftranscript|><|notimestamps|>\u2581This \u2581is \u2581a \u2581test \u2581.<|endoftext|>", }, SPECIAL_TOKENS: { text: WHISPER_TEST_STRINGS.SPECIAL_TOKENS, tokens: ["\u0120", "\u0120", "\u0120", "<|startoftranscript|>", "\u0120", "<|en|>", "\u0120", "\u0120", "\u0120"], ids: [50257, 50362, 220, 220, 220, 50257, 220, 50258, 220, 220, 220, 50256], decoded: "<|startoftranscript|><|notimestamps|> <|startoftranscript|> <|en|> <|endoftext|>", }, }, "distil-whisper/distil-large-v3": { NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["0", "12", "3", "45", "67", "89", "\u01200", "\u01201", "\u01202", "\u01203", "\u01204", "\u01205", "\u01206", "\u01207", "\u01208", "\u01209", "\u012010", "\u0120100", "\u01201000"], ids: [50258, 50364, 15, 4762, 18, 8465, 22452, 21115, 1958, 502, 568, 805, 1017, 1025, 1386, 1614, 1649, 1722, 1266, 2319, 9714, 50257], decoded: "<|startoftranscript|><|notimestamps|>0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000<|endoftext|>", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["def", "\u0120main", "(", "):", "\u010a", "\u0109", "pass"], ids: [50258, 50364, 20595, 2135, 7, 4507, 198, 197, 9216, 50257], decoded: "<|startoftranscript|><|notimestamps|>def main():\n\tpass<|endoftext|>", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "\u0120a", "\u0120=", "\u0120ob", "j", ".", "to", "St", "ring", "(", ");", "\u010a", "to", "St", "ring", "(", ");"], ids: [50258, 50364, 2631, 257, 6585, 1111, 73, 13, 1353, 4520, 2937, 7, 34446, 198, 1353, 4520, 2937, 7, 34446, 50257], decoded: "<|startoftranscript|><|notimestamps|>let a = obj.toString();\ntoString();<|endoftext|>", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["UN", "w", "ant", "\u00c3\u00a9d", ",", "running"], ids: [50258, 50364, 3979, 86, 394, 7811, 11, 45482, 50257], decoded: "<|startoftranscript|><|notimestamps|>UNwant\u00e9d,running<|endoftext|>", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["he", "llo", "\u0120world"], ids: [50258, 50364, 675, 1913, 1002, 50257], decoded: "<|startoftranscript|><|notimestamps|>hello world<|endoftext|>", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u00e7\u0136\u0141\u00e6\u00b4\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"], ids: [50258, 50364, 49958, 1546, 6303, 8897, 249, 1541, 50257], decoded: "<|startoftranscript|><|notimestamps|>\u751f\u6d3b\u7684\u771f\u8c1b\u662f<|endoftext|>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2", "\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120test"], ids: [50258, 50364, 31636, 1848, 16, 497, 17, 3536, 18, 17450, 19, 14378, 20, 1815, 98, 21, 672, 224, 96, 22, 672, 224, 117, 23, 672, 224, 109, 24, 1500, 50257], decoded: "<|startoftranscript|><|notimestamps|>test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test<|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50364, 5616, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|notimestamps|>you\u2026\u00a0\u00a0<|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50364, 5616, 1260, 126, 254, 126, 254, 5616, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|notimestamps|>you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0<|endoftext|>", }, }, "distil-whisper/distil-large-v2": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["How", "\u0120are", "\u0120you", "\u0120doing", "?"], ids: [50258, 50259, 50359, 50363, 6462, 366, 291, 884, 30, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>How are you doing?<|endoftext|>", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["You", "\u0120should", "'ve", "\u0120done", "\u0120", "this"], ids: [50258, 50259, 50359, 50363, 3223, 820, 600, 1096, 220, 11176, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>You should've done this<|endoftext|>", }, NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["0", "12", "3", "45", "67", "89", "\u01200", "\u01201", "\u01202", "\u01203", "\u01204", "\u01205", "\u01206", "\u01207", "\u01208", "\u01209", "\u012010", "\u0120100", "\u01201000"], ids: [50258, 50259, 50359, 50363, 15, 4762, 18, 8465, 22452, 21115, 1958, 502, 568, 805, 1017, 1025, 1386, 1614, 1649, 1722, 1266, 2319, 9714, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000<|endoftext|>", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["The", "\u0120company", "\u0120was", "\u0120founded", "\u0120in", "\u01202016", "."], ids: [50258, 50259, 50359, 50363, 2278, 2237, 390, 13234, 294, 6549, 13, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>The company was founded in 2016.<|endoftext|>", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["A", "\u010a", "'ll", "\u0120!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'t", "."], ids: [50258, 50259, 50359, 50363, 32, 198, 603, 15138, 1353, 8569, 67, 15025, 67, 295, 11, 393, 380, 13, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>A\n'll!!to?'d''d of, can't.<|endoftext|>", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["def", "\u0120main", "(", "):", "\u010a", "\u0109", "pass"], ids: [50258, 50259, 50359, 50363, 20595, 2135, 7, 4507, 198, 197, 9216, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>def main():\n\tpass<|endoftext|>", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "\u0120a", "\u0120=", "\u0120ob", "j", ".", "to", "St", "ring", "(", ");", "\u010a", "to", "St", "ring", "(", ");"], ids: [50258, 50259, 50359, 50363, 2631, 257, 6585, 1111, 73, 13, 1353, 4520, 2937, 7, 34446, 198, 1353, 4520, 2937, 7, 34446, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>let a = obj.toString();\ntoString();<|endoftext|>", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["This", "\u010a", "\u010a", "is", "\u010a", "a", "\u010a", "test", "."], ids: [50258, 50259, 50359, 50363, 5723, 198, 198, 271, 198, 64, 198, 31636, 13, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>This\n\nis\na\ntest.<|endoftext|>", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["UN", "w", "ant", "\u00c3\u00a9d", ",", "running"], ids: [50258, 50259, 50359, 50363, 3979, 86, 394, 7811, 11, 45482, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>UNwant\u00e9d,running<|endoftext|>", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["1", "\u0100", "2", "\u00ef\u00bf\u00bd", "3"], ids: [50258, 50259, 50359, 50363, 16, 188, 17, 5342, 18, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>1\u00002\ufffd3<|endoftext|>", }, HELLO_WORLD_TITLECASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE, tokens: ["Hello", "\u0120World"], ids: [50258, 50259, 50359, 50363, 15947, 3937, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>Hello World<|endoftext|>", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["he", "llo", "\u0120world"], ids: [50258, 50259, 50359, 50363, 675, 1913, 1002, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>hello world<|endoftext|>", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u00e7\u0136\u0141\u00e6\u00b4\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"], ids: [50258, 50259, 50359, 50363, 49958, 1546, 6303, 8897, 249, 1541, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>\u751f\u6d3b\u7684\u771f\u8c1b\u662f<|endoftext|>", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["\u0120", "\u0120", "\u0120leading", "\u0120space"], ids: [50258, 50259, 50359, 50363, 220, 220, 5775, 1901, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> leading space<|endoftext|>", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["tra", "iling", "\u0120space", "\u0120", "\u0120", "\u0120"], ids: [50258, 50259, 50359, 50363, 17227, 4883, 1901, 220, 220, 220, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>trailing space <|endoftext|>", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["Hi", "\u0120", "\u0120Hello"], ids: [50258, 50259, 50359, 50363, 17155, 220, 2425, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>Hi Hello<|endoftext|>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2", "\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120", "test"], ids: [50258, 50259, 50359, 50363, 31636, 1848, 16, 497, 17, 3536, 18, 17450, 19, 14378, 20, 1815, 98, 21, 672, 224, 96, 22, 672, 224, 117, 23, 672, 224, 109, 24, 220, 31636, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test<|endoftext|>", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "\u0120bought", "\u0120an", "\u0120apple", "\u0120for", "\u0120$", "1", ".", "00", "\u0120at", "\u0120", "the", "\u0120store", "."], ids: [50258, 50259, 50359, 50363, 40, 4243, 364, 10606, 337, 1848, 16, 13, 628, 412, 220, 3322, 3531, 13, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>I bought an apple for $1.00 at the store.<|endoftext|>", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["you", "\u00e2\u0122\u00a6", "\u0120", "\u0120"], ids: [50258, 50259, 50359, 50363, 5616, 1260, 220, 220, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>you\u2026 <|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50259, 50359, 50363, 5616, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>you\u2026\u00a0\u00a0<|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50259, 50359, 50363, 5616, 1260, 126, 254, 126, 254, 5616, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0<|endoftext|>", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["we", "ird", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120edge", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120case"], ids: [50258, 50259, 50359, 50363, 826, 1271, 25072, 121, 252, 4691, 25072, 121, 252, 1389, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>weird \uff5e edge \uff5e case<|endoftext|>", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["\u00e2\u0138", "\u0123", "This", "\u0120\u00e2\u0138", "\u0123", "is", "\u0120\u00e2\u0138", "\u0123", "a", "\u0120\u00e2\u0138", "\u0123", "test", "\u0120\u00e2\u0138", "\u0123", "."], ids: [50258, 50259, 50359, 50363, 39984, 223, 5723, 29405, 223, 271, 29405, 223, 64, 29405, 223, 31636, 29405, 223, 13, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>\u2581This \u2581is \u2581a \u2581test \u2581.<|endoftext|>", }, SPECIAL_TOKENS: { text: WHISPER_TEST_STRINGS.SPECIAL_TOKENS, tokens: ["\u0120", "\u0120", "\u0120", "<|startoftranscript|>", "\u0120", "<|en|>", "\u0120", "\u0120", "\u0120"], ids: [50258, 50259, 50359, 50363, 220, 220, 220, 50258, 220, 50259, 220, 220, 220, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> <|startoftranscript|> <|en|> <|endoftext|>", }, }, "distil-whisper/distil-small.en": { SPECIAL_TOKENS: { text: WHISPER_TEST_STRINGS.SPECIAL_TOKENS, // https://github.com/huggingface/transformers/issues/33371 // tokens: [" <|startoftranscript|> ", "<|en|> "], tokens: ["<|startoftranscript|>", "<|en|>"], ids: [50257, 50362, 50257, 50258, 50256], decoded: "<|startoftranscript|><|notimestamps|><|startoftranscript|><|en|><|endoftext|>", }, }, "Xenova/nb-whisper-tiny-beta": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["\u0120How", "\u0120are", "\u0120you", "\u0120doing", "?"], ids: [50258, 50288, 50359, 50363, 1012, 366, 291, 884, 30, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> How are you doing?<|endoftext|>", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["\u0120You", "\u0120should", "'ve", "\u0120done", "\u0120", "this"], ids: [50258, 50288, 50359, 50363, 509, 820, 600, 1096, 220, 11176, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> You should've done this<|endoftext|>", }, NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["\u01200", "12", "3", "45", "67", "89", "\u01200", "\u01201", "\u01202", "\u01203", "\u01204", "\u01205", "\u01206", "\u01207", "\u01208", "\u01209", "\u012010", "\u0120100", "\u01201000"], ids: [50258, 50288, 50359, 50363, 1958, 4762, 18, 8465, 22452, 21115, 1958, 502, 568, 805, 1017, 1025, 1386, 1614, 1649, 1722, 1266, 2319, 9714, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> 0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000<|endoftext|>", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["\u0120The", "\u0120company", "\u0120was", "\u0120founded", "\u0120in", "\u01202016", "."], ids: [50258, 50288, 50359, 50363, 440, 2237, 390, 13234, 294, 6549, 13, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> The company was founded in 2016.<|endoftext|>", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["\u0120A", "\u010a", "'ll", "\u0120!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'t", "."], ids: [50258, 50288, 50359, 50363, 316, 198, 603, 15138, 1353, 8569, 67, 15025, 67, 295, 11, 393, 380, 13, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> A\n'll!!to?'d''d of, can't.<|endoftext|>", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["\u0120def", "\u0120main", "(", "):", "\u010a", "\u0109", "pass"], ids: [50258, 50288, 50359, 50363, 1060, 2135, 7, 4507, 198, 197, 9216, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> def main():\n\tpass<|endoftext|>", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["\u0120let", "\u0120a", "\u0120=", "\u0120ob", "j", ".", "to", "St", "ring", "(", ");", "\u010a", "to", "St", "ring", "(", ");"], ids: [50258, 50288, 50359, 50363, 718, 257, 6585, 1111, 73, 13, 1353, 4520, 2937, 7, 34446, 198, 1353, 4520, 2937, 7, 34446, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> let a = obj.toString();\ntoString();<|endoftext|>", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["\u0120This", "\u010a", "\u010a", "is", "\u010a", "a", "\u010a", "test", "."], ids: [50258, 50288, 50359, 50363, 639, 198, 198, 271, 198, 64, 198, 31636, 13, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> This\n\nis\na\ntest.<|endoftext|>", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["\u0120UN", "w", "ant", "\u00c3\u00a9d", ",", "running"], ids: [50258, 50288, 50359, 50363, 8229, 86, 394, 7811, 11, 45482, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> UNwant\u00e9d,running<|endoftext|>", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["\u01201", "\u0100", "2", "\u00ef\u00bf\u00bd", "3"], ids: [50258, 50288, 50359, 50363, 502, 188, 17, 5342, 18, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> 1\u00002\ufffd3<|endoftext|>", }, HELLO_WORLD_TITLECASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE, tokens: ["\u0120Hello", "\u0120World"], ids: [50258, 50288, 50359, 50363, 2425, 3937, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> Hello World<|endoftext|>", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["\u0120hello", "\u0120world"], ids: [50258, 50288, 50359, 50363, 7751, 1002, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> hello world<|endoftext|>", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u0120", "\u00e7\u0136\u0141\u00e6\u00b4\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"], ids: [50258, 50288, 50359, 50363, 220, 49958, 1546, 6303, 8897, 249, 1541, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> \u751f\u6d3b\u7684\u771f\u8c1b\u662f<|endoftext|>", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["\u0120", "\u0120", "\u0120leading", "\u0120space"], ids: [50258, 50288, 50359, 50363, 220, 220, 5775, 1901, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> leading space<|endoftext|>", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["\u0120", "tra", "iling", "\u0120space", "\u0120", "\u0120", "\u0120"], ids: [50258, 50288, 50359, 50363, 220, 17227, 4883, 1901, 220, 220, 220, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> trailing space <|endoftext|>", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["\u0120Hi", "\u0120", "\u0120Hello"], ids: [50258, 50288, 50359, 50363, 2421, 220, 2425, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> Hi Hello<|endoftext|>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["\u0120", "test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2", "\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120", "test"], ids: [50258, 50288, 50359, 50363, 220, 31636, 1848, 16, 497, 17, 3536, 18, 17450, 19, 14378, 20, 1815, 98, 21, 672, 224, 96, 22, 672, 224, 117, 23, 672, 224, 109, 24, 220, 31636, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test<|endoftext|>", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["\u0120I", "\u0120bought", "\u0120an", "\u0120apple", "\u0120for", "\u0120$", "1", ".", "00", "\u0120at", "\u0120", "the", "\u0120store", "."], ids: [50258, 50288, 50359, 50363, 286, 4243, 364, 10606, 337, 1848, 16, 13, 628, 412, 220, 3322, 3531, 13, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> I bought an apple for $1.00 at the store.<|endoftext|>", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["\u0120you", "\u00e2\u0122\u00a6", "\u0120", "\u0120"], ids: [50258, 50288, 50359, 50363, 291, 1260, 220, 220, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> you\u2026 <|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["\u0120you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50288, 50359, 50363, 291, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> you\u2026\u00a0\u00a0<|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["\u0120you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50288, 50359, 50363, 291, 1260, 126, 254, 126, 254, 5616, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0<|endoftext|>", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["\u0120weird", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120edge", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120case"], ids: [50258, 50288, 50359, 50363, 3657, 25072, 121, 252, 4691, 25072, 121, 252, 1389, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> weird \uff5e edge \uff5e case<|endoftext|>", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["\u0120\u00e2\u0138", "\u0123", "This", "\u0120\u00e2\u0138", "\u0123", "is", "\u0120\u00e2\u0138", "\u0123", "a", "\u0120\u00e2\u0138", "\u0123", "test", "\u0120\u00e2\u0138", "\u0123", "."], ids: [50258, 50288, 50359, 50363, 29405, 223, 5723, 29405, 223, 271, 29405, 223, 64, 29405, 223, 31636, 29405, 223, 13, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> \u2581This \u2581is \u2581a \u2581test \u2581.<|endoftext|>", }, SPECIAL_TOKENS: { text: WHISPER_TEST_STRINGS.SPECIAL_TOKENS, tokens: ["\u0120", "\u0120", "\u0120", "<|startoftranscript|>", "\u0120", "<|en|>", "\u0120", "\u0120", "\u0120"], ids: [50258, 50288, 50359, 50363, 220, 220, 220, 50258, 220, 50259, 220, 220, 220, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> <|startoftranscript|> <|en|> <|endoftext|>", }, }, }; const MAX_EXECUTION_TIME = 10_000; export const CUSTOM_TESTS = () => { describe("Decode ASR", () => { it( "should decode ASR outputs", async () => { const tokenizer = await WhisperTokenizer.from_pretrained("onnx-community/whisper-tiny.en_timestamped"); const model_outputs = [ { stride: [30, 0, 5], tokens: [50257n, 50362n, 8410n, 7283n, 0n, 2329n, 8410n, 7283n, 0n, 2094n, 470n, 1309n, 534n, 10625n, 307n, 10625n, 13n, 34668n, 345n, 531n, 9439n, 11n, 523n, 655n, 8410n, 7283n, 0n, 39134n, 16592n, 10625n, 0n, 9440n, 36n, 26751n, 0n, 25848n, 8410n, 7283n, 0n, 2773n, 661n, 4320n, 1943n, 981n, 345n, 821n, 8066n, 7765n, 510n, 290n, 670n, 1327n, 379n, 340n, 13n, 10528n, 318n, 5340n, 0n, 50256n], token_timestamps: [0, 0, 0, 3.78, 4.22, 5.34, 6.04, 6.56, 7, 7.92, 8.58, 8.58, 8.88, 9.14, 9.54, 9.94, 10.58, 11.38, 11.88, 12.42, 12.62, 13, 13.36, 13.64, 14.26, 14.76, 15.12, 15.4, 15.74, 16.12, 16.66, 17.14, 17.24, 17.24, 17.72, 18.38, 18.6, 19.38, 19.92, 22.66, 22.9, 23.24, 23.5, 24.14, 24.56, 24.7, 24.72, 24.94, 25.18, 25.54, 25.72, 26.02, 26.34, 26.44, 26.84, 27.04, 27.16, 27.54, 28.06, 29.92], }, { stride: [30, 5, 5], tokens: [50257n, 50362n, 2773n, 661n, 4320n, 1943n, 981n, 345n, 821n, 8066n, 7765n, 510n, 290n, 670n, 1327n, 379n, 340n, 13n, 10528n, 318n, 5340n, 13n, 921n, 815n, 651n, 284n, 262n, 966n, 810n, 2687n, 2073n, 561n, 11238n, 290n, 345n, 821n, 407n, 8066n, 2245n, 612n, 13n, 1400n, 11n, 644n, 389n, 345n, 4953n, 329n, 30n, 2141n, 340n, 0n, 2329n, 466n, 340n, 0n, 3363n, 345n, 460n, 0n, 2329n, 466n, 340n, 0n, 50256n], token_timestamps: [0, 0, 0, 2.92, 3.24, 3.48, 4.14, 4.56, 4.7, 4.74, 4.92, 5.18, 5.54, 5.72, 6.04, 6.34, 6.46, 6.84, 7.04, 7.16, 7.54, 8.12, 10.16, 10.7, 10.9, 11.12, 11.24, 11.48, 11.84, 12.44, 12.82, 13.2, 13.46, 13.72, 14.06, 14.28, 14.34, 14.56, 14.8, 15.16, 15.9, 16.42, 16.82, 16.86, 17.02, 17.1, 17.22, 17.56, 18.06, 19.28, 19.62, 20.26, 21.96, 22.64, 24.28, 24.76, 25.18, 25.56, 25.78, 26.28, 27.12, 27.54, 27.82, 28.22, 29.48], }, { stride: [23.7728125, 5, 0], tokens: [50257n, 50362n, 2329n, 466n, 340n, 0n, 3363n, 345n, 460n, 0n, 2329n, 466n, 340n, 0n, 1002n, 345n, 821n, 10032n, 286n, 3599n, 625n, 11n, 2245n, 3501n, 510n, 13n, 50256n], token_timestamps: [0, 0, 0, 2.44, 4.3, 5.04, 5.06, 5.56, 5.8, 6.32, 7.12, 7.56, 7.8, 8.9, 10.92, 12.96, 13.28, 13.28, 13.44, 13.72, 13.96, 14.84, 15.5, 16.06, 16.86, 17.88, 20.92], }, ]; const target = [ " DO IT! Just DO IT! Don't let your dreams be dreams. Yesterday you said tomorrow, so just DO IT! MAKE YOUR dreams! COME TRUE! JUST DO IT! Some people dream success while you're gonna wake up and work hard at it. Nothing is impossible. You should get to the point where anyone else would quit and you're not gonna stop there. No, what are you waiting for? Do it! Just do it! Yes you can! Just do it! If you're tired of starting over, stop giving up.", { chunks: [ { text: " DO", timestamp: [0.0, 3.78] }, { text: " IT!", timestamp: [3.78, 4.24 /* 5.34 */] }, { text: " Just", timestamp: [5.34, 6.04] }, { text: " DO", timestamp: [6.04, 6.56] }, { text: " IT!", timestamp: [6.56, 7.02 /* 7.92 */] }, { text: " Don't", timestamp: [7.92, 8.58] }, { text: " let", timestamp: [8.58, 8.88] }, { text: " your", timestamp: [8.88, 9.14] }, { text: " dreams", timestamp: [9.14, 9.54] }, { text: " be", timestamp: [9.54, 9.94] }, { text: " dreams.", timestamp: [9.94, 10.6 /* 11.38 */] }, { text: " Yesterday", timestamp: [11.38, 11.88] }, { text: " you", timestamp: [11.88, 12.42] }, { text: " said", timestamp: [12.42, 12.62] }, { text: " tomorrow,", timestamp: [12.62, 13.02 /* 13.36 */] }, { text: " so", timestamp: [13.36, 13.64] }, { text: " just", timestamp: [13.64, 14.26] }, { text: " DO", timestamp: [14.26, 14.76] }, { text: " IT!", timestamp: [14.76, 15.14 /* 15.4 */] }, { text: " MAKE", timestamp: [15.4, 15.74] }, { text: " YOUR", timestamp: [15.74, 16.12] }, { text: " dreams!", timestamp: [16.12, 16.68 /* 17.14 */] }, { text: " COME", timestamp: [17.14, 17.24] }, { text: " TRUE!", timestamp: [17.24, 17.74 /* 18.38 */] }, { text: " JUST", timestamp: [18.38, 18.6] }, { text: " DO", timestamp: [18.6, 19.38] }, { text: " IT!", timestamp: [19.38, 19.94 /* 22.66 */] }, { text: " Some", timestamp: [22.66, 22.9] }, { text: " people", timestamp: [22.9, 23.24] }, { text: " dream", timestamp: [23.24, 23.5] }, { text: " success", timestamp: [23.5, 24.14] }, { text: " while", timestamp: [24.14, 24.56] }, { text: " you're", timestamp: [24.56, 24.72] }, { text: " gonna", timestamp: [24.72, 24.94] }, { text: " wake", timestamp: [24.94, 25.18] }, { text: " up", timestamp: [25.18, 25.54] }, { text: " and", timestamp: [25.54, 25.72] }, { text: " work", timestamp: [25.72, 26.04] }, { text: " hard", timestamp: [26.04, 26.34] }, { text: " at", timestamp: [26.34, 26.46] }, { text: " it.", timestamp: [26.46, 26.86 /* 27.04 */] }, { text: " Nothing", timestamp: [27.04, 27.16] }, { text: " is", timestamp: [27.16, 27.54] }, { text: " impossible.", timestamp: [27.54, 28.14 /* 30.16 */] }, { text: " You", timestamp: [30.16, 30.7] }, { text: " should", timestamp: [30.7, 30.9] }, { text: " get", timestamp: [30.9, 31.12] }, { text: " to", timestamp: [31.12, 31.24] }, { text: " the", timestamp: [31.24, 31.48] }, { text: " point", timestamp: [31.48, 31.84] }, { text: " where", timestamp: [31.84, 32.44] }, { text: " anyone", timestamp: [32.44, 32.82] }, { text: " else", timestamp: [32.82, 33.2] }, { text: " would", timestamp: [33.2, 33.46] }, { text: " quit", timestamp: [33.46, 33.72] }, { text: " and", timestamp: [33.72, 34.06] }, { text: " you're", timestamp: [34.06, 34.34] }, { text: " not", timestamp: [34.34, 34.56] }, { text: " gonna", timestamp: [34.56, 34.8] }, { text: " stop", timestamp: [34.8, 35.16] }, { text: " there.", timestamp: [35.16, 35.92 /* 36.42 */] }, { text: " No,", timestamp: [36.42, 36.84 /* 36.86 */] }, { text: " what", timestamp: [36.86, 37.02] }, { text: " are", timestamp: [37.02, 37.1] }, { text: " you", timestamp: [37.1, 37.22] }, { text: " waiting", timestamp: [37.22, 37.56] }, { text: " for?", timestamp: [37.56, 38.08 /* 39.28 */] }, { text: " Do", timestamp: [39.28, 39.62] }, { text: " it!", timestamp: [39.62, 40.28 /* 41.96 */] }, { text: " Just", timestamp: [41.96, 42.64] }, { text: " do", timestamp: [42.64, 44.28] }, { text: " it!", timestamp: [44.28, 44.78 /* 45.18 */] }, { text: " Yes", timestamp: [45.18, 45.56] }, { text: " you", timestamp: [45.56, 45.78] }, { text: " can!", timestamp: [45.8, 46.34 /* 47.12 */] }, { text: " Just", timestamp: [47.12, 47.56] }, { text: " do", timestamp: [47.56, 47.8] }, { text: " it!", timestamp: [47.8, 48.92 /* 50.92 */] }, { text: " If", timestamp: [50.92, 52.96] }, { text: " you're", timestamp: [52.96, 53.28] }, { text: " tired", timestamp: [53.28, 53.44] }, { text: " of", timestamp: [53.44, 53.72] }, { text: " starting", timestamp: [53.72, 53.96] }, { text: " over,", timestamp: [53.96, 54.86 /* 55.5 */] }, { text: " stop", timestamp: [55.5, 56.06] }, { text: " giving", timestamp: [56.06, 56.86] }, { text: " up.", timestamp: [56.86, 57.9 /* 60.92 */] }, ], }, ]; compare( tokenizer._decode_asr(model_outputs, { return_timestamps: "word", time_precision: 0.02, force_full_sequences: false, }), target, 1e-2, ); }, MAX_EXECUTION_TIME, ); it( "should handle overlapping edge case", async () => { const tokenizer = await WhisperTokenizer.from_pretrained("onnx-community/whisper-tiny.en_timestamped"); const model_outputs = [ { stride: [30, 0, 5], tokens: [50257n, 50362n, 8410n, 7283n, 0n, 2329n, 8410n, 7283n, 0n, 2094n, 470n, 1309n, 534n, 10625n, 307n, 10625n, 13n, 34668n, 11n, 345n, 531n, 9439n, 11n, 523n, 655n, 8410n, 7283n, 0n, 39134n, 16592n, 10560n, 3955n, 50n, 0n, 7102n, 5446n, 46n, 0n, 25848n, 8410n, 7283n, 0n, 2773n, 661n, 4320n, 1943n, 981n, 345n, 821n, 8066n, 7765n, 510n, 290n, 670n, 1327n, 379n, 340n, 13n, 10528n, 318n, 5340n, 13n, 50256n], token_timestamps: [0, 0, 0, 3.78, 4.22, 5.26, 6.04, 6.54, 7, 7.94, 8.58, 8.58, 8.88, 9.16, 9.54, 9.94, 10.6, 11.38, 11.88, 12.38, 12.44, 12.62, 13, 13.36, 13.64, 14.24, 14.74, 15.12, 15.4, 15.74, 16.1, 16.54, 16.54, 16.78, 17.08, 17.2, 17.36, 17.56, 18.08, 18.58, 19.38, 19.88, 22.54, 22.9, 23.24, 23.5, 24.14, 24.56, 24.7, 24.94, 24.94, 25.18, 25.54, 25.72, 26.04, 26.34, 26.46, 26.84, 27.04, 27.14, 27.54, 28.06, 29.92], }, { stride: [30, 5, 5], tokens: [50257n, 50362n, 2773n, 661n, 4320n, 1943n, 981n, 345n, 821n, 8066n, 7765n, 510n, 290n, 670n, 1327n, 379n, 340n, 13n, 10528n, 318n, 5340n, 13n, 921n, 815n, 651n, 284n, 262n, 966n, 810n, 2687n, 2073n, 561n, 11238n, 290n, 345n, 821n, 407n, 8066n, 2245n, 612n, 13n, 1400n, 11n, 644n, 389n, 345n, 4953n, 329n, 30n, 2141n, 340n, 0n, 2329n, 466n, 340n, 0n, 3363n, 11n, 345n, 460n, 0n, 2329n, 466n, 340n, 0n, 50256n], token_timestamps: [0, 0, 0, 2.92, 3.24, 3.5, 4.14, 4.56, 4.7, 4.74, 4.92, 5.18, 5.54, 5.74, 6.04, 6.34, 6.46, 6.84, 7.04, 7.18, 7.56, 8.12, 9.68, 10.7, 10.88, 11.1, 11.24, 11.48, 11.82, 12.46, 12.82, 13.2, 13.46, 13.72, 14.08, 14.28, 14.34, 14.56, 14.82, 15.16, 15.72, 16.42, 16.82, 16.86, 17, 17.1, 17.2, 17.56, 18.06, 19.28, 19.6, 20.28, 21.96, 22.64, 24.28, 24.76, 25.18, 25.56, 25.56, 25.84, 26.36, 27.12, 27.54, 27.82, 28.16, 29.48], }, { stride: [23.7728125, 5, 0], tokens: [50257n, 50362n, 2329n, 466n, 340n, 0n, 3363n, 345n, 460n, 0n, 2329n, 466n, 340n, 0n, 1002n, 534n, 15867n, 318n, 3599n, 625n, 11n, 2245n, 3501n, 510n, 13n, 50256n], token_timestamps: [0, 0, 0, 2.44, 4.3, 5.04, 5.06, 5.56, 5.8, 6.32, 7.12, 7.56, 7.8, 8.72, 10.04, 12.96, 13.3, 13.44, 13.72, 13.98, 14.86, 15.5, 16, 16.88, 17.76, 20.9], }, ]; const target = [ " DO IT! Just DO IT! Don't let your dreams be dreams. Yesterday, you said tomorrow, so just DO IT! MAKE YOUR DRIMS! CONTRO! JUST DO IT! Some people dream success while you're gonna wake up and work hard at it. Nothing is impossible. You should get to the point where anyone else would quit and you're not gonna stop there. No, what are you waiting for? Do it! Just do it! Yes, you can! Just do it! If your tire is starting over, stop giving up.", { chunks: [ { text: " DO", timestamp: [0, 3.78] }, { text: " IT!", timestamp: [3.78, 4.24] }, { text: " Just", timestamp: [5.26, 6.04] }, { text: " DO", timestamp: [6.04, 6.54] }, { text: " IT!", timestamp: [6.54, 7.02] }, { text: " Don't", timestamp: [7.94, 8.58] }, { text: " let", timestamp: [8.58, 8.88] }, { text: " your", timestamp: [8.88, 9.16] }, { text: " dreams", timestamp: [9.16, 9.54] }, { text: " be", timestamp: [9.54, 9.94] }, { text: " dreams.", timestamp: [9.94, 10.62] }, { text: " Yesterday,", timestamp: [11.38, 11.9] }, { text: " you", timestamp: [12.38, 12.44] }, { text: " said", timestamp: [12.44, 12.62] }, { text: " tomorrow,", timestamp: [12.62, 13.02] }, { text: " so", timestamp: [13.36, 13.64] }, { text: " just", timestamp: [13.64, 14.24] }, { text: " DO", timestamp: [14.24, 14.74] }, { text: " IT!", timestamp: [14.74, 15.14] }, { text: " MAKE", timestamp: [15.4, 15.74] }, { text: " YOUR", timestamp: [15.74, 16.1] }, { text: " DRIMS!", timestamp: [16.1, 16.8] }, { text: " CONTRO!", timestamp: [17.08, 17.58] }, { text: " JUST", timestamp: [18.08, 18.58] }, { text: " DO", timestamp: [18.58, 19.38] }, { text: " IT!", timestamp: [19.38, 19.9] }, { text: " Some", timestamp: [22.54, 22.9] }, { text: " people", timestamp: [22.9, 23.24] }, { text: " dream", timestamp: [23.24, 23.5] }, { text: " success", timestamp: [23.5, 24.14] }, { text: " while", timestamp: [24.14, 24.56] }, { text: " you're", timestamp: [24.56, 24.94] }, { text: " gonna", timestamp: [24.94, 24.94] }, { text: " wake", timestamp: [24.94, 25.18] }, { text: " up", timestamp: [25.18, 25.54] }, { text: " and", timestamp: [25.54, 25.74] }, { text: " work", timestamp: [25.74, 26.04] }, { text: " hard", timestamp: [26.04, 26.34] }, { text: " at", timestamp: [26.34, 26.46] }, { text: " it.", timestamp: [26.46, 26.86] }, { text: " Nothing", timestamp: [27.04, 27.18] }, { text: " is", timestamp: [27.18, 27.56] }, { text: " impossible.", timestamp: [27.56, 28.14] }, { text: " You", timestamp: [29.68, 30.7] }, { text: " should", timestamp: [30.7, 30.88] }, { text: " get", timestamp: [30.88, 31.1] }, { text: " to", timestamp: [31.1, 31.24] }, { text: " the", timestamp: [31.24, 31.48] }, { text: " point", timestamp: [31.48, 31.82] }, { text: " where", timestamp: [31.82, 32.46] }, { text: " anyone", timestamp: [32.46, 32.82] }, { text: " else", timestamp: [32.82, 33.2] }, { text: " would", timestamp: [33.2, 33.46] }, { text: " quit", timestamp: [33.46, 33.72] }, { text: " and", timestamp: [33.72, 34.08] }, { text: " you're", timestamp: [34.08, 34.34] }, { text: " not", timestamp: [34.34, 34.56] }, { text: " gonna", timestamp: [34.56, 34.82] }, { text: " stop", timestamp: [34.82, 35.16] }, { text: " there.", timestamp: [35.16, 35.74] }, { text: " No,", timestamp: [36.42, 36.84] }, { text: " what", timestamp: [36.86, 37] }, { text: " are", timestamp: [37, 37.1] }, { text: " you", timestamp: [37.1, 37.2] }, { text: " waiting", timestamp: [37.2, 37.56] }, { text: " for?", timestamp: [37.56, 38.08] }, { text: " Do", timestamp: [39.28, 39.6] }, { text: " it!", timestamp: [39.6, 40.3] }, { text: " Just", timestamp: [41.96, 42.64] }, { text: " do", timestamp: [42.64, 44.28] }, { text: " it!", timestamp: [44.28, 44.78] }, { text: " Yes,", timestamp: [45.18, 45.56] }, { text: " you", timestamp: [45.56, 45.84] }, { text: " can!", timestamp: [45.8, 46.34] }, { text: " Just", timestamp: [47.12, 47.56] }, { text: " do", timestamp: [47.56, 47.8] }, { text: " it!", timestamp: [47.8, 48.74] }, { text: " If", timestamp: [50.04, 52.96] }, { text: " your", timestamp: [52.96, 53.3] }, { text: " tire", timestamp: [53.3, 53.44] }, { text: " is", timestamp: [53.44, 53.72] }, { text: " starting", timestamp: [53.72, 53.98] }, { text: " over,", timestamp: [53.98, 54.88] }, { text: " stop", timestamp: [55.5, 56] }, { text: " giving", timestamp: [56, 56.88] }, { text: " up.", timestamp: [56.88, 57.78] }, ], }, ]; compare( tokenizer._decode_asr(model_outputs, { return_timestamps: "word", time_precision: 0.02, force_full_sequences: false, }), target, 1e-2, ); }, MAX_EXECUTION_TIME, ); }); };
transformers.js/tests/models/whisper/test_tokenization_whisper.js/0
{ "file_path": "transformers.js/tests/models/whisper/test_tokenization_whisper.js", "repo_id": "transformers.js", "token_count": 25252 }
import { pipeline, QuestionAnsweringPipeline } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; const PIPELINE_ID = "question-answering"; export default () => { describe("Question Answering", () => { const model_id = "hf-internal-testing/tiny-random-BertForQuestionAnswering"; /** @type {QuestionAnsweringPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of QuestionAnsweringPipeline", () => { expect(pipe).toBeInstanceOf(QuestionAnsweringPipeline); }); describe("batch_size=1", () => { it( "default (top_k=1)", async () => { const output = await pipe("a", "b c"); const target = { score: 0.11395696550607681, /* start: 0, end: 1, */ answer: "b" }; expect(output).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "custom (top_k=3)", async () => { const output = await pipe("a", "b c", { top_k: 3 }); const target = [ { score: 0.11395696550607681, /* start: 0, end: 1, */ answer: "b" }, { score: 0.11300431191921234, /* start: 2, end: 3, */ answer: "c" }, { score: 0.10732574015855789, /* start: 0, end: 3, */ answer: "b c" }, ]; expect(output).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/pipelines/test_pipelines_question_answering.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_question_answering.js", "repo_id": "transformers.js", "token_count": 775 }
import { // Models AutoModelForSeq2SeqLM, AutoModelForCausalLM, LlamaForCausalLM, LlavaForConditionalGeneration, // Tokenizers AutoTokenizer, LlamaTokenizer, // Processors AutoProcessor, Processor, // Other TextStreamer, RawImage, } from "../../src/transformers.js"; import { init, MAX_TEST_EXECUTION_TIME, MAX_MODEL_LOAD_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; // Initialise the testing environment init(); // Helper function to generate text const generate = async (model, tokenizer, text, options) => { const inputs = tokenizer(text); return await model.generate({ ...inputs, ...options, }); }; describe("Generation parameters", () => { // List all models which will be tested const models = [ "hf-internal-testing/tiny-random-T5ForConditionalGeneration", // encoder-decoder "hf-internal-testing/tiny-random-LlamaForCausalLM", // decoder-only ]; const DUMMY_TEXT = "hello"; describe(`encoder-decoder (${models[0]})`, () => { const model_id = models[0]; let model; let tokenizer; beforeAll(async () => { model = await AutoModelForSeq2SeqLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await AutoTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); // NOTE: Since `max_length` defaults to 20, this case also tests that. it( "default", async () => { const outputs = await generate(model, tokenizer, DUMMY_TEXT, {}); expect(outputs.dims.at(-1)).toEqual(20); }, MAX_TEST_EXECUTION_TIME, ); it( "max_new_tokens", async () => { const MAX_NEW_TOKENS = 5; const outputs = await generate(model, tokenizer, DUMMY_TEXT, { max_new_tokens: MAX_NEW_TOKENS, }); expect(outputs.dims.at(-1)).toEqual(MAX_NEW_TOKENS + 1); // + 1 due to forced BOS token }, MAX_TEST_EXECUTION_TIME, ); it( "min_length", async () => { const MIN_LENGTH = 3; const MAX_LENGTH = 5; const outputs = await generate(model, tokenizer, DUMMY_TEXT, { eos_token_id: 0, min_length: MIN_LENGTH, max_length: MAX_LENGTH, }); expect(outputs.tolist()).toEqual([[0n, 11924n, 11924n, 11924n, 11924n]]); expect(outputs.dims.at(-1)).toBeGreaterThanOrEqual(MIN_LENGTH); }, MAX_TEST_EXECUTION_TIME, ); it( "min_new_tokens", async () => { const MIN_NEW_TOKENS = 2; const MAX_LENGTH = 5; const outputs = await generate(model, tokenizer, DUMMY_TEXT, { eos_token_id: 0, min_new_tokens: MIN_NEW_TOKENS, max_length: MAX_LENGTH, }); expect(outputs.tolist()).toEqual([[0n, 11924n, 11924n, 11924n, 11924n]]); expect(outputs.dims.at(-1)).toBeGreaterThanOrEqual(MIN_NEW_TOKENS); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe(`decoder-only (${models[1]})`, () => { const model_id = models[1]; let model; let tokenizer; beforeAll(async () => { model = await AutoModelForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await AutoTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); // NOTE: Since `max_length` defaults to 20, this case also tests that. it( "default", async () => { const outputs = await generate(model, tokenizer, DUMMY_TEXT, {}); expect(outputs.dims.at(-1)).toEqual(20); }, MAX_TEST_EXECUTION_TIME, ); it( "max_new_tokens", async () => { const MAX_NEW_TOKENS = 5; const PROMPT_LENGTH = 2; // BOS + DUMMY_TEXT const outputs = await generate(model, tokenizer, DUMMY_TEXT, { max_new_tokens: MAX_NEW_TOKENS, }); const expected_length = PROMPT_LENGTH + MAX_NEW_TOKENS; expect(outputs.dims.at(-1)).toEqual(expected_length); }, MAX_TEST_EXECUTION_TIME, ); it( "min_length", async () => { const MIN_LENGTH = 4; const outputs = await generate(model, tokenizer, DUMMY_TEXT, { eos_token_id: [ 18547, // min_length will suppress this token (generated by default) 16012, // stop at this token ], min_length: MIN_LENGTH, }); expect(outputs.tolist()).toEqual([[1n, 22172n, 31583n, 18824n, 16621n, 8136n, 16012n]]); expect(outputs.dims.at(-1)).toBeGreaterThanOrEqual(MIN_LENGTH); }, MAX_TEST_EXECUTION_TIME, ); it( "min_new_tokens", async () => { const MIN_NEW_TOKENS = 2; const outputs = await generate(model, tokenizer, DUMMY_TEXT, { eos_token_id: [ 18547, // min_new_tokens will suppress this token (generated by default) 16012, // stop at this token ], min_new_tokens: MIN_NEW_TOKENS, }); expect(outputs.tolist()).toEqual([[1n, 22172n, 31583n, 18824n, 16621n, 8136n, 16012n]]); expect(outputs.dims.at(-1)).toBeGreaterThanOrEqual(MIN_NEW_TOKENS); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); }); describe("Streamers", () => { describe("decoder-only", () => { const model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM"; let model, tokenizer; beforeAll(async () => { model = await AutoModelForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await AutoTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const target_chunks = ["helloerdingsdelete ", "melytabular ", "Stadiumoba ", "alcune ", "drug"]; const chunks = []; const callback_function = (text) => { chunks.push(text); }; const streamer = new TextStreamer(tokenizer, { callback_function, skip_special_tokens: true }); const inputs = tokenizer("hello"); const outputs = await model.generate({ ...inputs, max_length: 10, streamer, }); expect(outputs.tolist()).toEqual([[1n, 22172n, 18547n, 8143n, 22202n, 9456n, 17213n, 15330n, 26591n, 15721n]]); expect(chunks).toEqual(target_chunks); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); }); describe("PKV caching", () => { describe("LlamaForCausalLM", () => { const model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM"; /** @type {LlamaForCausalLM} */ let model; /** @type {LlamaTokenizer} */ let tokenizer; beforeAll(async () => { model = await LlamaForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await LlamaTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("1"); // Generate first sequence w/o PKV // NOTE: `return_dict_in_generate=true` is required to get PKV const { past_key_values, sequences } = await model.generate({ ...inputs, max_new_tokens: 5, do_sample: false, return_dict_in_generate: true, }); // Update output with new text const decoded = tokenizer.batch_decode(sequences, { skip_special_tokens: false, })[0]; const new_inputs = tokenizer(decoded + "2", { add_special_tokens: false, }); // Run w/o PKV const generated_ids = await model.generate({ ...new_inputs, max_new_tokens: 3, do_sample: false, }); // Run w/ PKV const generated_ids_pkv = await model.generate({ ...new_inputs, past_key_values, max_new_tokens: 3, do_sample: false, }); const target = [[1n, 259n, 29896n, 24959n, 22063n, 17192n, 12189n, 22468n, 29906n, 3399n, 24823n, 26470n]]; expect(generated_ids.tolist()).toEqual(target); expect(generated_ids_pkv.tolist()).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("LlavaForConditionalGeneration", () => { const model_id = "Xenova/tiny-random-LlavaForConditionalGeneration"; /** @type {LlavaForConditionalGeneration} */ let model; /** @type {PreTrainedTokenizer} */ let tokenizer; /** @type {Processor} */ let processor; beforeAll(async () => { model = await LlavaForConditionalGeneration.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await AutoTokenizer.from_pretrained(model_id); processor = await AutoProcessor.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const text_inputs = tokenizer("<image>hello"); // Empty white image const dims = [224, 224, 3]; const image = new RawImage(new Uint8ClampedArray(dims[0] * dims[1] * dims[2]).fill(255), ...dims); const vision_inputs = await processor(image); // Generate first sequence w/o PKV // NOTE: `return_dict_in_generate=true` is required to get PKV const { past_key_values, sequences } = await model.generate({ ...text_inputs, ...vision_inputs, max_new_tokens: 5, do_sample: false, return_dict_in_generate: true, }); // Update output with new text const decoded = tokenizer.batch_decode(sequences).map((x) => x + "new"); const new_inputs = tokenizer(decoded, { add_special_tokens: false, }); // Run w/o PKV const generated_ids = await model.generate({ ...new_inputs, ...vision_inputs, max_new_tokens: 3, do_sample: false, }); // Run w/ PKV const generated_ids_pkv = await model.generate({ ...new_inputs, past_key_values, max_new_tokens: 3, do_sample: false, }); const target = [[1n, 32000n, 29871n, 23927n, 359n, 1519n, 568n, 5769n, 1330n, 21544n, 11568n, 1482n, 7258n, 1250n, 16117n]]; expect(generated_ids.tolist()).toEqual(target); expect(generated_ids_pkv.tolist()).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); });
transformers.js/tests/utils/generation.test.js/0
{ "file_path": "transformers.js/tests/utils/generation.test.js", "repo_id": "transformers.js", "token_count": 4999 }
FROM nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04 LABEL maintainer="Hugging Face" LABEL repository="transformers" RUN apt update && \ apt install -y bash \ build-essential \ git \ curl \ ca-certificates \ python3 \ python3-pip && \ rm -rf /var/lib/apt/lists RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ jupyter \ tensorflow \ torch RUN git clone https://github.com/NVIDIA/apex RUN cd apex && \ python3 setup.py install && \ pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ WORKDIR /workspace COPY . transformers/ RUN cd transformers/ && \ python3 -m pip install --no-cache-dir . CMD ["/bin/bash"]
transformers/docker/transformers-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-gpu/Dockerfile", "repo_id": "transformers", "token_count": 397 }
# المعالجة المسبقة Preprocessing [[open-in-colab]] قبل تدريب نموذج على مجموعة بيانات، يجب معالجتها مسبقًا وفقًا تنسيق المتوقع لمدخلات النموذج. سواء كانت بياناتك نصية أو صورًا أو صوتًا، فيجب تحويلها وتجميعها في دفعات من الموترات. يوفر 🤗 Transformers مجموعة من فئات المعالجة المسبقة للمساعدة في إعداد بياناتك للنموذج. في هذا البرنامج التعليمي، ستتعلم أنه بالنسبة لـ: * للنص، استخدم [مُجزّئ الرموز](./main_classes/tokenizer) لتحويل النص إلى تسلسل من الرموز، وإنشاء تمثيل رقمي للرموز، وتجميعها في موترات(tensors). * للكلام والصوت، استخدم [مستخرج الميزات](./main_classes/feature_extractor) لاستخراج ميزات متسلسلة من أشكال موجات الصوت وتحويلها إلى موترات. * تستخدم مدخلات الصورة [ImageProcessor](./main_classes/image_processor) لتحويل الصور إلى موترات. * تستخدم مدخلات متعددة الوسائط [معالجًا](./main_classes/processors) لدمج مُجزّئ الرموز ومستخرج الميزات أو معالج الصور. <Tip> `AutoProcessor` **يعمل دائمًا** ويختار تلقائيًا الفئة الصحيحة للنموذج الذي تستخدمه، سواء كنت تستخدم مُجزّئ رموز أو معالج صور أو مستخرج ميزات أو معالجًا. </Tip> قبل البدء، قم بتثبيت 🤗 Datasets حتى تتمكن من تحميل بعض مجموعات البيانات لتجربتها: ```bash pip install datasets ``` ## معالجة اللغة الطبيعية (Natural Language Processing (NLP <Youtube id="Yffk5aydLzg"/> أداة المعالجة المسبقة الرئيسية للبيانات النصية هي [مُجزّئ اللغوي](main_classes/tokenizer). يقوم مُجزّئ اللغوي بتقسيم النص إلى "أجزاء لغوية" (tokens) وفقًا لمجموعة من القواعد. يتم تحويل الأجزاء اللغوية إلى أرقام ثم إلى منسوجات، والتي تصبح مدخلات للنموذج. يقوم المجزئ اللغوي بإضافة أي مدخلات إضافية يحتاجها النموذج. <Tip> إذا كنت تخطط لاستخدام نموذج مُدرب مسبقًا، فمن المهم استخدامالمجزئ اللغوي المقترن بنفس ذلك النموذج. يضمن ذلك تقسيم النص بنفس الطريقة التي تم بها تقسيم النصوص ما قبل التدريب، واستخدام نفس القاموس الذي يربط بين الأجزاء اللغوية وأرقامها ( يُشار إليها عادةً باسم المفردات *vocab*) أثناء التدريب المسبق. </Tip> ابدأ بتحميل المُجزّئ اللغوي مُدرب مسبقًا باستخدام طريقة [`AutoTokenizer.from_pretrained`]. يقوم هذا بتنزيل المفردات *vocab* الذي تم تدريب النموذج عليه: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") ``` ثم مرر نصك إلى المُجزّئ اللغوي: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") >>> print(encoded_input) {'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` يعيد المُجزّئ اللغوي قاموسًا يحتوي على ثلاثة عناصر مهمة: * [input_ids](glossary#input-ids) هي الفهارس المقابلة لكل رمز في الجملة. * [attention_mask](glossary#attention-mask) يشير إلى ما إذا كان يجب الانتباه بالرمز أم لا. * [token_type_ids](glossary#token-type-ids) يحدد التسلسل الذي ينتمي إليه الرمز عندما يكون هناك أكثر من تسلسل واحد. أعد إدخالك الأصلي عن طريق فك ترميز `input_ids`: ```py >>> tokenizer.decode(encoded_input["input_ids"]) '[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]' ``` كما ترى، أضاف المُجزّئ اللغوي رمزين خاصين - `CLS` و`SEP` (مصنف وفاصل) - إلى الجملة. لا تحتاج جميع النماذج إلى رموز خاصة، ولكن إذا فعلوا ذلك، فإن المُجزّئ اللغوي يضيفها تلقائيًا لك. إذا كان هناك عدة جمل تريد معالجتها مسبقًا، فقم بتمريرها كقائمة إلى مُجزّئ اللغوي: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_inputs = tokenizer(batch_sentences) >>> print(encoded_inputs) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]} ``` ### الحشو Padding لا تكون الجمل دائمًا بنفس الطول، وهذا يمكن أن يمثل مشكلة لأن الموترات،وهي مدخلات النموذج، تحتاج إلى شكل موحد. الحشو هو استراتيجية لضمان أن تكون الموترات مستطيلة عن طريق إضافة رمز حشو *padding* خاص إلى الجمل الأقصر. قم بتعيين معلمة الحشو `padding` إلى `True` لحشو التسلسلات الأقصر في الدفعة لتطابق أطول تسلسل: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0، 0، 0، 0، 0، 0، 0، 0]]} ``` تم الآن حشو الجملتين الأولى والثالثة بـ `0` لأنهما أقصر. ### البتر Truncation وعلى صعيد أخر، قد يكون التسلسل طويلًا جدًا بالنسبة للنموذج للتعامل معه. في هذه الحالة، ستحتاج إلى بتر التسلسل إلى طول أقصر. قم بتعيين معلمة `truncation` إلى `True` لتقليم تسلسل إلى الطول الأقصى الذي يقبله النموذج: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0، 0، 0، 0، 0]]، 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0، 0، 0، 0], [1, 1, 1, 1, 1, 1, 1، 1، 1، 1، 1، 1، 1، 1، 1، 1], [1، 1، 1، 1، 1، 1، 1، 0، 0، 0، 0، 0، 0، 0، 0، 0]]} ``` <Tip> تحقق من دليل المفاهيم [Padding and truncation](./pad_truncation) لمعرفة المزيد حول معامﻻت الحشو و البتر المختلفة. </Tip> ### بناء الموترات Build tensors أخيرًا، تريد أن يقوم المجزئ اللغوي بإرجاع موترات (tensors) الفعلية التي ستُغذي النموذج. قم بتعيين معلمة `return_tensors` إلى إما `pt` لـ PyTorch، أو `tf` لـ TensorFlow: <frameworkcontent> <pt> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") >>> print(encoded_input) {'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` </pt> <tf> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") >>> print(encoded_input) {'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>} ``` </tf> </frameworkcontent> <Tip> تدعم خطوط الأنابيب المختلفة معامل مُجزِّئ الرموز(tokenizer) بشكل مختلف في طريقة `()__call__` الخاصة بها. و خطوط الأنابيب `text-2-text-generation` تدعم فقط `truncation`. و خطوط الأنابيب `text-generation` تدعم `max_length` و`truncation` و`padding` و`add_special_tokens`. أما في خطوط الأنابيب `fill-mask`، يمكن تمرير معامل مُجزِّئ الرموز (tokenizer) في المتغير `tokenizer_kwargs` (قاموس). </Tip> ## الصوت Audio بالنسبة للمهام الصوتية، ستحتاج إلى [مستخرج الميزات](main_classes/feature_extractor) لإعداد مجموعة البيانات الخاصة بك للنماذج. تم تصميم مستخرج الميزات لاستخراج الميزات من بيانات الصوت الخام، وتحويلها إلى موتورات. قم بتحميل مجموعة بيانات [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) (راجع البرنامج التعليمي لـ 🤗 [Datasets](https://huggingface.co/docs/datasets/load_hub) لمزيد من التفاصيل حول كيفية تحميل مجموعة بيانات) لمعرفة كيفية استخدام مستخرج الميزات مع مجموعات البيانات الصوتية: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` الوصول إلى العنصر الأول من عمود `audio` لمعرفة المدخلات. يؤدي استدعاء عمود `audio` إلى تحميل ملف الصوت وإعادة أخذ العينات تلقائيًا: ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` يعيد هذا ثلاثة عناصر: * `array` هو إشارة الكلام المحملة - وإعادة أخذ العينات المحتملة - كصفيف 1D. * `path` يشير إلى موقع ملف الصوت. * `sampling_rate` يشير إلى عدد نقاط البيانات في إشارة الكلام المقاسة في الثانية. بالنسبة لهذا البرنامج التعليمي، ستستخدم نموذج [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base). الق نظرة على بطاقة النموذج، وستتعلم أن Wav2Vec2 مُدرب مسبقًا على صوت الكلام الذي تم أخذ عينات منه بمعدل 16 كيلو هرتز. من المهم أن يتطابق معدل أخذ العينات لبيانات الصوت مع معدل أخذ العينات لمجموعة البيانات المستخدمة لتدريب النموذج مسبقًا. إذا لم يكن معدل أخذ العينات لبياناتك هو نفسه، فيجب إعادة أخذ العينات من بياناتك. 1. استخدم طريقة [`~datasets.Dataset.cast_column`] في 🤗 Datasets لإعادة أخذ العينات بمعدل أخذ العينات 16 كيلو هرتز: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` 2. استدعاء عمود `audio` مرة أخرى لأخذ عينات من ملف الصوت: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` بعد ذلك، قم بتحميل مستخرج الميزات لتطبيع وحشو المدخلات. عند إضافة حشو للبيانات النصية، تتم إضافة "0" للتسلسلات الأقصر. تنطبق نفس الفكرة على بيانات الصوت. يضيف مستخرج الميزات "0" - الذي يتم تفسيره على أنه صمت - إلى "array". قم بتحميل مستخرج الميزات باستخدام [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` مرر صفيف الصوت إلى مستخرج الميزات. كما نوصي بإضافة معامل `sampling_rate` في مستخرج الميزات من أجل تصحيح الأخطاء الصامتة التي قد تحدث بشكل أفضل. ```py >>> audio_input = [dataset[0]["audio"]["array"]] >>> feature_extractor(audio_input, sampling_rate=16000) {'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` تمامًا مثل مُجزِّئ الرموز، يمكنك تطبيق الحشو أو البتر للتعامل مع التسلسلات المتغيرة في دفعة. الق نظرة على طول التسلسل لهاتين العينتين الصوتيتين: ```py >>> dataset[0]["audio"]["array"].shape (173398,) >>> dataset[1]["audio"]["array"].shape (106496,) ``` قم بإنشاء دالة لمعالجة مجموعة البيانات بحيث يكون للنماذج الصوتية نفس الأطوال. حدد أقصى طول للعينة ، وسيقوم مستخرج الميزات إما بإضافة حشو أو بتر التسلسلات لمطابقتها: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs ``` قم بتطبيق `preprocess_function` على أول بضع أمثلة في مجموعة البيانات: ```py >>> processed_dataset = preprocess_function(dataset[:5]) ``` أطوال العينات الآن متساوية وتطابق الطول الأقصى المحدد. يمكنك الآن تمرير مجموعة البيانات المعالجة إلى النموذج! ```py >>> processed_dataset["input_values"][0].shape (100000,) >>> processed_dataset["input_values"][1].shape (100000,) ``` ## رؤية الكمبيوتر Computer vision بالنسبة لمهام رؤية الحاسوبية، ستحتاج إلى معالج صور [image processor](main_classes/image_processor) لإعداد مجموعة البيانات الخاصة بك لتناسب النموذج. تتكون معالجة الصور المسبقة من عدة خطوات لتحويل الصور إلى الشكل الذي يتوقعه النموذج. وتشمل هذه الخطوات، على سبيل المثال لا الحصر، تغيير الحجم والتطبيع وتصحيح قناة الألوان وتحويل الصور إلى موترات(tensors). <Tip> عادة ما تتبع معالجة الصور المسبقة شكلاً من أشكال زيادة البيانات (التضخيم). كلا العمليتين، معالجة الصور المسبقة وزيادة الصور تغيران بيانات الصورة، ولكنها تخدم أغراضًا مختلفة: *زيادة البيانات: تغيير الصور عن طريق زيادة الصور بطريقة يمكن أن تساعد في منع الإفراط في التعميم وزيادة متانة النموذج. يمكنك أن تكون مبدعًا في كيفية زيادة بياناتك - ضبط السطوع والألوان، واالقص، والدوران، تغيير الحجم، التكبير، إلخ. ومع ذلك، كن حذرًا من عدم تغيير معنى الصور بزياداتك. *معالجة الصور المسبقة: تضمن معالجة الصور اتتطابق الصور مع تنسيق الإدخال المتوقع للنموذج. عند ضبط نموذج رؤية حاسوبية بدقة، يجب معالجة الصور بالضبط كما كانت عند تدريب النموذج في البداية. يمكنك استخدام أي مكتبة تريدها لزيادة بيانات الصور. لمعالجة الصور المسبقة، استخدم `ImageProcessor` المرتبط بالنموذج. </Tip> قم بتحميل مجموعة بيانات [food101](https://huggingface.co/datasets/food101) (راجع دليل 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) لمزيد من التفاصيل حول كيفية تحميل مجموعة بيانات) لمعرفة كيف يمكنك استخدام معالج الصور مع مجموعات بيانات رؤية الحاسب: <Tip> استخدم معامل `split` من 🤗 Datasets لتحميل عينة صغيرة فقط من مجموعة التدريب نظرًا لحجم البيانات كبيرة جدًا! </Tip> ```py >>> from datasets import load_dataset >>> dataset = load_dataset("food101", split="train[:100]") ``` بعد ذلك، الق نظرة على الصورة مع ميزة 🤗 Datasets [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image): ```py >>> dataset[0]["image"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png"/> </div> قم بتحميل معالج الصور باستخدام [`AutoImageProcessor.from_pretrained`]: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` أولاً، دعنا نضيف بعض الزيادات إلى الصور. يمكنك استخدام أي مكتبة تفضلها، ولكن في هذا الدليل، سنستخدم وحدة [`transforms`](https://pytorch.org/vision/stable/transforms.html) من torchvision. إذا كنت مهتمًا باستخدام مكتبة زيادة بيانات أخرى، فتعرف على كيفية القيام بذلك في [دفاتر Albumentations](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) أو [دفاتر Kornia](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb). 1. هنا نستخدم [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html) لربط بعض التحولات معًا - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html) و [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html). لاحظ بالنسبة لتغيير الحجم، يمكننا الحصول على متطلبات حجم الصورة من `image_processor`. بالنسبة لبعض النماذج، يُتوقع ارتفاع وعرض دقيقين، بينما بالنسبة للنماذج الأخرى، يتم تحديد الحافة الأقصر`shortest_edge` فقط. ```py >>> from torchvision.transforms import RandomResizedCrop, ColorJitter, Compose >>> size = ( ... image_processor.size["shortest_edge"] ... if "shortest_edge" in image_processor.size ... else (image_processor.size["height"], image_processor.size["width"]) ... ) >>> _transforms = Compose([RandomResizedCrop(size), ColorJitter(brightness=0.5, hue=0.5)]) ``` 2. يقبل النموذج [`pixel_values`](model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) كإدخال له. يمكن لـ `ImageProcessor` التعامل مع تطبيع الصور، وتوليد موترات(tensors) مناسبة. قم بإنشاء دالة تجمع بين تضخيم بيانات الصور ومعالجة الصور المسبقة لمجموعة من الصور وتوليد `pixel_values`: ```py >>> def transforms(examples): ... images = [_transforms(img.convert("RGB")) for img in examples["image"]] ... examples["pixel_values"] = image_processor(images, do_resize=False, return_tensors="pt")["pixel_values"] ... return examples ``` <Tip> في المثال أعلاه، قمنا بتعيين `do_resize=False` لأننا قمنا بالفعل بتغيير حجم الصور في تحويل زيادة الصور، واستفدنا من خاصية `size` من `image_processor` المناسب. إذا لم تقم بتغيير حجم الصور أثناء زيادة الصور، فاترك هذا المعلمة. بشكل افتراضي، ستتعامل `ImageProcessor` مع تغيير الحجم. إذا كنت ترغب في تطبيع الصور كجزء من تحويل زيادة الصور، فاستخدم قيم `image_processor.image_mean`، و `image_processor.image_std`. </Tip> 3. ثم استخدم 🤗 Datasets[`~datasets.Dataset.set_transform`] لتطبيق التحولات أثناء التنقل: ```py >>> dataset.set_transform(transforms) ``` 4. الآن عند الوصول إلى الصورة، ستلاحظ أن معالج الصور قد أضاف `pixel_values`. يمكنك تمرير مجموعة البيانات المعالجة إلى النموذج الآن! ```py >>> dataset[0].keys() ``` هكذا تبدو الصورة بعد تطبيق التحولات. تم اقتصاص الصورة بشكل عشوائي وتختلف خصائص الألوان بها. ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset[0]["pixel_values"] >>> plt.imshow(img.permute(1, 2, 0)) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png"/> </div> <Tip> بالنسبة للمهام مثل الكشف عن الأشياء، والتجزئة الدلالية، والتجزئة المثالية، والتجزئة الشاملة، يوفر `ImageProcessor` تقوم هذه الطرق بتحويل النواتج الأولية للنموذج إلى تنبؤات ذات معنى مثل مربعات الحدود، أو خرائط التجزئة. </Tip> ### الحشو Pad في بعض الحالات، على سبيل المثال، عند ضبط نموذج [DETR](./model_doc/detr) بدقة، يقوم النموذج بتطبيق زيادة المقياس أثناء التدريب. قد يتسبب ذلك في اختلاف أحجام الصور في دفعة واحدة. يمكنك استخدام [`DetrImageProcessor.pad`] من [`DetrImageProcessor`] وتحديد دالة `collate_fn` مخصصة لتجميع الصور معًا. ```py >>> def collate_fn(batch): ... pixel_values = [item["pixel_values"] for item in batch] ... encoding = image_processor.pad(pixel_values, return_tensors="pt") ... labels = [item["labels"] for item in batch] ... batch = {} ... batch["pixel_values"] = encoding["pixel_values"] ... batch["pixel_mask"] = encoding["pixel_mask"] ... batch["labels"] = labels ... return batch ``` ## متعدد الوسائط Mulimodal بالنسبة للمهام التي تتطلب مدخلات متعددة الوسائط، ستحتاج إلى معالج [processor](main_classes/processors) لإعداد مجموعة البيانات الخاصة بك لتناسب النموذج. يقترن المعالج بين بمعالجين آخرين مثل محول النص إلى رمز ومستخرج الميزات. قم بتحميل مجموعة بيانات [LJ Speech](https://huggingface.co/datasets/lj_speech) (راجع دليل 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) لمزيد من التفاصيل حول كيفية تحميل مجموعة بيانات) لمعرفة كيف يمكنك استخدام معالج للتعرف التلقائي على الكلام (ASR): ```py >>> from datasets import load_dataset >>> lj_speech = load_dataset("lj_speech", split="train") ``` بالنسبة لـ ASR، فأنت تركز بشكل أساسي على `audio` و `text` لذا يمكنك إزالة الأعمدة الأخرى: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` الآن الق نظرة على أعمدة `audio` و `text`: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` الآن الق نظرة على أعمدة `audio` و `text`: ```py >>> lj_speech[0]["audio"] {'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', 'sampling_rate': 22050} >>> lj_speech[0]["text"] 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` تذكر أنه يجب عليك دائمًا [إعادة أخذ العينات](preprocessing#audio) لمعدل أخذ العينات في مجموعة البيانات الصوتية الخاصة بك لمطابقة معدل أخذ العينات في مجموعة البيانات المستخدمة لتدريب النموذج مسبقًا! ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` قم بتحميل معالج باستخدام [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` 1. قم بإنشاء دالة لمعالجة بيانات الصوت الموجودة في `array` إلى `input_values`، ورموز `text` إلى `labels`. هذه هي المدخلات للنموذج: ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) ... return example ``` 2. قم بتطبيق دالة `prepare_dataset` على عينة: ```py >>> prepare_dataset(lj_speech[0]) ``` لقد أضاف المعالج الآن `input_values` و `labels`، وتم أيضًا إعادة أخذ العينات لمعدل أخذ العينات بشكل صحيح إلى 16 كيلو هرتز. يمكنك تمرير مجموعة البيانات المعالجة إلى النموذج الآن!
transformers/docs/source/ar/preprocessing.md/0
{ "file_path": "transformers/docs/source/ar/preprocessing.md", "repo_id": "transformers", "token_count": 15734 }
# Tiktoken والتفاعل مع Transformers يتم دمج دعم ملفات نموذج tiktoken بسلاسة في 🤗 transformers عند تحميل النماذج `from_pretrained` مع ملف `tokenizer.model` tiktoken على Hub، والذي يتم تحويله تلقائيًا إلى [المحلل اللغوي السريع](https://huggingface.co/docs/transformers/main/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast). ### النماذج المعروفة التي تم إصدارها مع `tiktoken.model`: - gpt2 - llama3 ## مثال على الاستخدام من أجل تحميل ملفات `tiktoken` في `transformers`، تأكد من أن ملف `tokenizer.model` هو ملف tiktoken وسيتم تحميله تلقائيًا عند التحميل `from_pretrained`. إليك كيفية تحميل مجزىء لغوي ونموذج، والذي يمكن تحميله من نفس الملف بالضبط: ```py from transformers import AutoTokenizer model_id = "meta-llama/Meta-Llama-3-8B-Instruct" tokenizer = AutoTokenizer.from_pretrained(model_id, subfolder="original") ``` ## إنشاء مجزىء لغوي tiktoken لا يحتوي ملف `tokenizer.model` على أي معلومات حول الرموز أو الأنماط الإضافية. إذا كانت هذه الأمور مهمة، قم بتحويل المحلل اللغوي إلى `tokenizer.json`، وهو التنسيق المناسب لـ [`PreTrainedTokenizerFast`]. قم بتوليد ملف `tokenizer.model` باستخدام [tiktoken.get_encoding](https://github.com/openai/tiktoken/blob/63527649963def8c759b0f91f2eb69a40934e468/tiktoken/registry.py#L63) ثم قم بتحويله إلى `tokenizer.json` باستخدام [`convert_tiktoken_to_fast`]. ```py from transformers.integrations.tiktoken import convert_tiktoken_to_fast from tiktoken import get_encoding # يمكنك تحميل ترميزك المخصص أو الترميز الذي توفره OpenAI encoding = get_encoding("gpt2") convert_tiktoken_to_fast(encoding, "config/save/dir") ``` يتم حفظ ملف `tokenizer.json` الناتج في الدليل المحدد ويمكن تحميله باستخدام [`PreTrainedTokenizerFast`]. ```py tokenizer = PreTrainedTokenizerFast.from_pretrained("config/save/dir") ```
transformers/docs/source/ar/tiktoken.md/0
{ "file_path": "transformers/docs/source/ar/tiktoken.md", "repo_id": "transformers", "token_count": 1097 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Ein Modell teilen Die letzten beiden Tutorials haben gezeigt, wie man ein Modell mit PyTorch, Keras und 🤗 Accelerate für verteilte Setups feinabstimmen kann. Der nächste Schritt besteht darin, Ihr Modell mit der Community zu teilen! Bei Hugging Face glauben wir an den offenen Austausch von Wissen und Ressourcen, um künstliche Intelligenz für alle zu demokratisieren. Wir ermutigen Sie, Ihr Modell mit der Community zu teilen, um anderen zu helfen, Zeit und Ressourcen zu sparen. In diesem Tutorial lernen Sie zwei Methoden kennen, wie Sie ein trainiertes oder verfeinertes Modell auf dem [Model Hub](https://huggingface.co/models) teilen können: - Programmgesteuertes Übertragen Ihrer Dateien auf den Hub. - Ziehen Sie Ihre Dateien per Drag-and-Drop über die Weboberfläche in den Hub. <iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <Tip> Um ein Modell mit der Öffentlichkeit zu teilen, benötigen Sie ein Konto auf [huggingface.co](https://huggingface.co/join). Sie können auch einer bestehenden Organisation beitreten oder eine neue Organisation gründen. </Tip> ## Repository-Funktionen Jedes Repository im Model Hub verhält sich wie ein typisches GitHub-Repository. Unsere Repositorys bieten Versionierung, Commit-Historie und die Möglichkeit, Unterschiede zu visualisieren. Die integrierte Versionierung des Model Hub basiert auf Git und [git-lfs](https://git-lfs.github.com/). Mit anderen Worten: Sie können ein Modell als ein Repository behandeln, was eine bessere Zugriffskontrolle und Skalierbarkeit ermöglicht. Die Versionskontrolle ermöglicht *Revisionen*, eine Methode zum Anheften einer bestimmten Version eines Modells mit einem Commit-Hash, Tag oder Branch. Folglich können Sie eine bestimmte Modellversion mit dem Parameter "Revision" laden: ```py >>> model = AutoModel.from_pretrained( ... "julien-c/EsperBERTo-small", revision="4c77982" # tag name, or branch name, or commit hash ... ) ``` Dateien lassen sich auch in einem Repository leicht bearbeiten, und Sie können die Commit-Historie sowie die Unterschiede einsehen: ![vis_diff](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png) ## Einrichtung Bevor Sie ein Modell für den Hub freigeben, benötigen Sie Ihre Hugging Face-Anmeldedaten. Wenn Sie Zugang zu einem Terminal haben, führen Sie den folgenden Befehl in der virtuellen Umgebung aus, in der 🤗 Transformers installiert ist. Dadurch werden Ihre Zugangsdaten in Ihrem Hugging Face-Cache-Ordner (standardmäßig `~/.cache/`) gespeichert: ```bash huggingface-cli login ``` Wenn Sie ein Notebook wie Jupyter oder Colaboratory verwenden, stellen Sie sicher, dass Sie die [`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library) Bibliothek installiert haben. Diese Bibliothek ermöglicht Ihnen die programmatische Interaktion mit dem Hub. ```bash pip install huggingface_hub ``` Verwenden Sie dann `notebook_login`, um sich beim Hub anzumelden, und folgen Sie dem Link [hier](https://huggingface.co/settings/token), um ein Token für die Anmeldung zu generieren: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Ein Modell für alle Frameworks konvertieren Um sicherzustellen, dass Ihr Modell von jemandem verwendet werden kann, der mit einem anderen Framework arbeitet, empfehlen wir Ihnen, Ihr Modell sowohl mit PyTorch- als auch mit TensorFlow-Checkpoints zu konvertieren und hochzuladen. Während Benutzer immer noch in der Lage sind, Ihr Modell von einem anderen Framework zu laden, wenn Sie diesen Schritt überspringen, wird es langsamer sein, weil 🤗 Transformers den Checkpoint on-the-fly konvertieren müssen. Die Konvertierung eines Checkpoints für ein anderes Framework ist einfach. Stellen Sie sicher, dass Sie PyTorch und TensorFlow installiert haben (siehe [hier](installation) für Installationsanweisungen), und finden Sie dann das spezifische Modell für Ihre Aufgabe in dem anderen Framework. <frameworkcontent> <pt> Geben Sie `from_tf=True` an, um einen Prüfpunkt von TensorFlow nach PyTorch zu konvertieren: ```py >>> pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True) >>> pt_model.save_pretrained("path/to/awesome-name-you-picked") ``` </pt> <tf> Geben Sie `from_pt=True` an, um einen Prüfpunkt von PyTorch nach TensorFlow zu konvertieren: ```py >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True) ``` Dann können Sie Ihr neues TensorFlow-Modell mit seinem neuen Checkpoint speichern: ```py >>> tf_model.save_pretrained("path/to/awesome-name-you-picked") ``` </tf> <jax> Wenn ein Modell in Flax verfügbar ist, können Sie auch einen Kontrollpunkt von PyTorch nach Flax konvertieren: ```py >>> flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( ... "path/to/awesome-name-you-picked", from_pt=True ... ) ``` </jax> </frameworkcontent> ## Ein Modell während des Trainings hochladen <frameworkcontent> <pt> <Youtube id="Z1-XMy-GNLQ"/> Die Weitergabe eines Modells an den Hub ist so einfach wie das Hinzufügen eines zusätzlichen Parameters oder Rückrufs. Erinnern Sie sich an das [Feinabstimmungs-Tutorial](training), in der Klasse [`TrainingArguments`] geben Sie Hyperparameter und zusätzliche Trainingsoptionen an. Eine dieser Trainingsoptionen beinhaltet die Möglichkeit, ein Modell direkt an den Hub zu pushen. Setzen Sie `push_to_hub=True` in Ihrer [`TrainingArguments`]: ```py >>> training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True) ``` Übergeben Sie Ihre Trainingsargumente wie gewohnt an [`Trainer`]: ```py >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... ) ``` Nach der Feinabstimmung Ihres Modells rufen Sie [`~transformers.Trainer.push_to_hub`] auf [`Trainer`] auf, um das trainierte Modell an den Hub zu übertragen. Transformers fügt sogar automatisch Trainings-Hyperparameter, Trainingsergebnisse und Framework-Versionen zu Ihrer Modellkarte hinzu! ```py >>> trainer.push_to_hub() ``` </pt> <tf> Geben Sie ein Modell mit [`PushToHubCallback`] an den Hub weiter. In der [`PushToHubCallback`] Funktion, fügen Sie hinzu: - Ein Ausgabeverzeichnis für Ihr Modell. - Einen Tokenizer. - Die `hub_model_id`, die Ihr Hub-Benutzername und Modellname ist. ```py >>> from transformers import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model" ... ) ``` Fügen Sie den Callback zu [`fit`](https://keras.io/api/models/model_training_apis/) hinzu, und 🤗 Transformers wird das trainierte Modell an den Hub weiterleiten: ```py >>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback) ``` </tf> </frameworkcontent> ## Verwenden Sie die Funktion `push_to_hub`. Sie können `push_to_hub` auch direkt für Ihr Modell aufrufen, um es in den Hub hochzuladen. Geben Sie den Namen Ihres Modells in "push_to_hub" an: ```py >>> pt_model.push_to_hub("my-awesome-model") ``` Dadurch wird ein Repository unter Ihrem Benutzernamen mit dem Modellnamen `my-awesome-model` erstellt. Benutzer können nun Ihr Modell mit der Funktion `from_pretrained` laden: ```py >>> from transformers import AutoModel >>> model = AutoModel.from_pretrained("your_username/my-awesome-model") ``` Wenn Sie zu einer Organisation gehören und Ihr Modell stattdessen unter dem Namen der Organisation pushen wollen, fügen Sie diesen einfach zur `repo_id` hinzu: ```py >>> pt_model.push_to_hub("my-awesome-org/my-awesome-model") ``` Die Funktion "push_to_hub" kann auch verwendet werden, um andere Dateien zu einem Modell-Repository hinzuzufügen. Zum Beispiel kann man einen Tokenizer zu einem Modell-Repository hinzufügen: ```py >>> tokenizer.push_to_hub("my-awesome-model") ``` Oder vielleicht möchten Sie die TensorFlow-Version Ihres fein abgestimmten PyTorch-Modells hinzufügen: ```py >>> tf_model.push_to_hub("my-awesome-model") ``` Wenn Sie nun zu Ihrem Hugging Face-Profil navigieren, sollten Sie Ihr neu erstelltes Modell-Repository sehen. Wenn Sie auf die Registerkarte **Dateien** klicken, werden alle Dateien angezeigt, die Sie in das Repository hochgeladen haben. Weitere Einzelheiten zum Erstellen und Hochladen von Dateien in ein Repository finden Sie in der Hub-Dokumentation [hier](https://huggingface.co/docs/hub/how-to-upstream). ## Hochladen mit der Weboberfläche Benutzer, die einen no-code Ansatz bevorzugen, können ein Modell über das Webinterface des Hubs hochladen. Besuchen Sie [huggingface.co/new](https://huggingface.co/new) um ein neues Repository zu erstellen: ![new_model_repo](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png) Fügen Sie von hier aus einige Informationen über Ihr Modell hinzu: - Wählen Sie den **Besitzer** des Repositorys. Dies können Sie selbst oder eine der Organisationen sein, denen Sie angehören. - Wählen Sie einen Namen für Ihr Modell, der auch der Name des Repositorys sein wird. - Wählen Sie, ob Ihr Modell öffentlich oder privat ist. - Geben Sie die Lizenzverwendung für Ihr Modell an. Klicken Sie nun auf die Registerkarte **Dateien** und klicken Sie auf die Schaltfläche **Datei hinzufügen**, um eine neue Datei in Ihr Repository hochzuladen. Ziehen Sie dann eine Datei per Drag-and-Drop hoch und fügen Sie eine Übergabemeldung hinzu. ![upload_file](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png) ## Hinzufügen einer Modellkarte Um sicherzustellen, dass die Benutzer die Fähigkeiten, Grenzen, möglichen Verzerrungen und ethischen Aspekte Ihres Modells verstehen, fügen Sie bitte eine Modellkarte zu Ihrem Repository hinzu. Die Modellkarte wird in der Datei `README.md` definiert. Sie können eine Modellkarte hinzufügen, indem Sie: * Manuelles Erstellen und Hochladen einer "README.md"-Datei. * Klicken Sie auf die Schaltfläche **Modellkarte bearbeiten** in Ihrem Modell-Repository. Werfen Sie einen Blick auf die DistilBert [model card](https://huggingface.co/distilbert/distilbert-base-uncased) als gutes Beispiel für die Art von Informationen, die eine Modellkarte enthalten sollte. Weitere Details über andere Optionen, die Sie in der Datei "README.md" einstellen können, wie z.B. den Kohlenstoff-Fußabdruck eines Modells oder Beispiele für Widgets, finden Sie in der Dokumentation [hier](https://huggingface.co/docs/hub/models-cards).
transformers/docs/source/de/model_sharing.md/0
{ "file_path": "transformers/docs/source/de/model_sharing.md", "repo_id": "transformers", "token_count": 4286 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Agents and tools [[open-in-colab]] ### What is an agent? Large Language Models (LLMs) trained to perform [causal language modeling](./tasks/language_modeling) can tackle a wide range of tasks, but they often struggle with basic tasks like logic, calculation, and search. When prompted in domains in which they do not perform well, they often fail to generate the answer we expect them to. One approach to overcome this weakness is to create an *agent*. An agent is a system that uses an LLM as its engine, and it has access to functions called *tools*. These *tools* are functions for performing a task, and they contain all necessary description for the agent to properly use them. The agent can be programmed to: - devise a series of actions/tools and run them all at once, like the [`CodeAgent`] - plan and execute actions/tools one by one and wait for the outcome of each action before launching the next one, like the [`ReactJsonAgent`] ### Types of agents #### Code agent This agent has a planning step, then generates python code to execute all its actions at once. It natively handles different input and output types for its tools, thus it is the recommended choice for multimodal tasks. #### React agents This is the go-to agent to solve reasoning tasks, since the ReAct framework ([Yao et al., 2022](https://huggingface.co/papers/2210.03629)) makes it really efficient to think on the basis of its previous observations. We implement two versions of ReactJsonAgent: - [`ReactJsonAgent`] generates tool calls as a JSON in its output. - [`ReactCodeAgent`] is a new type of ReactJsonAgent that generates its tool calls as blobs of code, which works really well for LLMs that have strong coding performance. > [!TIP] > Read [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) blog post to learn more about ReAct agents. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif" /> </div> ![Framework of a React Agent](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-source-llms-as-agents/ReAct.png) For example, here is how a ReAct Code agent would work its way through the following question. ```py3 >>> agent.run( ... "How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?", ... ) =====New task===== How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need? ====Agent is executing the code below: bert_blocks = search(query="number of blocks in BERT base encoder") print("BERT blocks:", bert_blocks) ==== Print outputs: BERT blocks: twelve encoder blocks ====Agent is executing the code below: attention_layer = search(query="number of layers in Attention is All You Need") print("Attention layers:", attention_layer) ==== Print outputs: Attention layers: Encoder: The encoder is composed of a stack of N = 6 identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position- 2 Page 3 Figure 1: The Transformer - model architecture. ====Agent is executing the code below: bert_blocks = 12 attention_layers = 6 diff = bert_blocks - attention_layers print("Difference in blocks:", diff) final_answer(diff) ==== Print outputs: Difference in blocks: 6 Final answer: 6 ``` ### How can I build an agent? To initialize an agent, you need these arguments: - an LLM to power your agent - the agent is not exactly the LLM, it’s more like the agent is a program that uses an LLM as its engine. - a system prompt: what the LLM engine will be prompted with to generate its output - a toolbox from which the agent pick tools to execute - a parser to extract from the LLM output which tools are to call and with which arguments Upon initialization of the agent system, the tool attributes are used to generate a tool description, then baked into the agent’s `system_prompt` to let it know which tools it can use and why. To start with, please install the `agents` extras in order to install all default dependencies. ```bash pip install transformers[agents] ``` Build your LLM engine by defining a `llm_engine` method which accepts a list of [messages](./chat_templating) and returns text. This callable also needs to accept a `stop` argument that indicates when to stop generating. ```python from huggingface_hub import login, InferenceClient login("<YOUR_HUGGINGFACEHUB_API_TOKEN>") client = InferenceClient(model="meta-llama/Meta-Llama-3-70B-Instruct") def llm_engine(messages, stop_sequences=["Task"]) -> str: response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1000) answer = response.choices[0].message.content return answer ``` You could use any `llm_engine` method as long as: 1. it follows the [messages format](./chat_templating) (`List[Dict[str, str]]`) for its input `messages`, and it returns a `str`. 2. it stops generating outputs at the sequences passed in the argument `stop_sequences` Additionally, `llm_engine` can also take a `grammar` argument. In the case where you specify a `grammar` upon agent initialization, this argument will be passed to the calls to llm_engine, with the `grammar` that you defined upon initialization, to allow [constrained generation](https://huggingface.co/docs/text-generation-inference/conceptual/guidance) in order to force properly-formatted agent outputs. You will also need a `tools` argument which accepts a list of `Tools` - it can be an empty list. You can also add the default toolbox on top of your `tools` list by defining the optional argument `add_base_tools=True`. Now you can create an agent, like [`CodeAgent`], and run it. You can also create a [`TransformersEngine`] with a pre-initialized pipeline to run inference on your local machine using `transformers`. For convenience, since agentic behaviours generally require stronger models such as `Llama-3.1-70B-Instruct` that are harder to run locally for now, we also provide the [`HfApiEngine`] class that initializes a `huggingface_hub.InferenceClient` under the hood. ```python from transformers import CodeAgent, HfApiEngine llm_engine = HfApiEngine(model="meta-llama/Meta-Llama-3-70B-Instruct") agent = CodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True) agent.run( "Could you translate this sentence from French, say it out loud and return the audio.", sentence="Où est la boulangerie la plus proche?", ) ``` This will be handy in case of emergency baguette need! You can even leave the argument `llm_engine` undefined, and an [`HfApiEngine`] will be created by default. ```python from transformers import CodeAgent agent = CodeAgent(tools=[], add_base_tools=True) agent.run( "Could you translate this sentence from French, say it out loud and give me the audio.", sentence="Où est la boulangerie la plus proche?", ) ``` Note that we used an additional `sentence` argument: you can pass text as additional arguments to the model. You can also use this to indicate the path to local or remote files for the model to use: ```py from transformers import ReactCodeAgent agent = ReactCodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True) agent.run("Why does Mike not know many people in New York?", audio="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/recording.mp3") ``` The prompt and output parser were automatically defined, but you can easily inspect them by calling the `system_prompt_template` on your agent. ```python print(agent.system_prompt_template) ``` It's important to explain as clearly as possible the task you want to perform. Every [`~Agent.run`] operation is independent, and since an agent is powered by an LLM, minor variations in your prompt might yield completely different results. You can also run an agent consecutively for different tasks: each time the attributes `agent.task` and `agent.logs` will be re-initialized. #### Code execution A Python interpreter executes the code on a set of inputs passed along with your tools. This should be safe because the only functions that can be called are the tools you provided (especially if it's only tools by Hugging Face) and the print function, so you're already limited in what can be executed. The Python interpreter also doesn't allow imports by default outside of a safe list, so all the most obvious attacks shouldn't be an issue. You can still authorize additional imports by passing the authorized modules as a list of strings in argument `additional_authorized_imports` upon initialization of your [`ReactCodeAgent`] or [`CodeAgent`]: ```py >>> from transformers import ReactCodeAgent >>> agent = ReactCodeAgent(tools=[], additional_authorized_imports=['requests', 'bs4']) >>> agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?") (...) 'Hugging Face – Blog' ``` The execution will stop at any code trying to perform an illegal operation or if there is a regular Python error with the code generated by the agent. > [!WARNING] > The LLM can generate arbitrary code that will then be executed: do not add any unsafe imports! ### The system prompt An agent, or rather the LLM that drives the agent, generates an output based on the system prompt. The system prompt can be customized and tailored to the intended task. For example, check the system prompt for the [`ReactCodeAgent`] (below version is slightly simplified). ```text You will be given a task to solve as best you can. You have access to the following tools: <<tool_descriptions>> To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences. At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task, then the tools that you want to use. Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '/End code' sequence. During each intermediate step, you can use 'print()' to save whatever important information you will then need. These print outputs will then be available in the 'Observation:' field, for using this information as input for the next step. In the end you have to return a final answer using the `final_answer` tool. Here are a few examples using notional tools: --- {examples} Above example were using notional tools that might not exist for you. You only have acces to those tools: <<tool_names>> You also can perform computations in the python code you generate. Always provide a 'Thought:' and a 'Code:\n```py' sequence ending with '```<end_code>' sequence. You MUST provide at least the 'Code:' sequence to move forward. Remember to not perform too many operations in a single code block! You should split the task into intermediate code blocks. Print results at the end of each step to save the intermediate results. Then use final_answer() to return the final result. Remember to make sure that variables you use are all defined. Now Begin! ``` The system prompt includes: - An *introduction* that explains how the agent should behave and what tools are. - A description of all the tools that is defined by a `<<tool_descriptions>>` token that is dynamically replaced at runtime with the tools defined/chosen by the user. - The tool description comes from the tool attributes, `name`, `description`, `inputs` and `output_type`, and a simple `jinja2` template that you can refine. - The expected output format. You could improve the system prompt, for example, by adding an explanation of the output format. For maximum flexibility, you can overwrite the whole system prompt template by passing your custom prompt as an argument to the `system_prompt` parameter. ```python from transformers import ReactJsonAgent from transformers.agents import PythonInterpreterTool agent = ReactJsonAgent(tools=[PythonInterpreterTool()], system_prompt="{your_custom_prompt}") ``` > [!WARNING] > Please make sure to define the `<<tool_descriptions>>` string somewhere in the `template` so the agent is aware of the available tools. ### Inspecting an agent run Here are a few useful attributes to inspect what happened after a run: - `agent.logs` stores the fine-grained logs of the agent. At every step of the agent's run, everything gets stored in a dictionary that then is appended to `agent.logs`. - Running `agent.write_inner_memory_from_logs()` creates an inner memory of the agent's logs for the LLM to view, as a list of chat messages. This method goes over each step of the log and only stores what it's interested in as a message: for instance, it will save the system prompt and task in separate messages, then for each step it will store the LLM output as a message, and the tool call output as another message. Use this if you want a higher-level view of what has happened - but not every log will be transcripted by this method. ## Tools A tool is an atomic function to be used by an agent. You can for instance check the [`PythonInterpreterTool`]: it has a name, a description, input descriptions, an output type, and a `__call__` method to perform the action. When the agent is initialized, the tool attributes are used to generate a tool description which is baked into the agent's system prompt. This lets the agent know which tools it can use and why. ### Default toolbox Transformers comes with a default toolbox for empowering agents, that you can add to your agent upon initialization with argument `add_base_tools = True`: - **Document question answering**: given a document (such as a PDF) in image format, answer a question on this document ([Donut](./model_doc/donut)) - **Image question answering**: given an image, answer a question on this image ([VILT](./model_doc/vilt)) - **Speech to text**: given an audio recording of a person talking, transcribe the speech into text ([Whisper](./model_doc/whisper)) - **Text to speech**: convert text to speech ([SpeechT5](./model_doc/speecht5)) - **Translation**: translates a given sentence from source language to target language. - **DuckDuckGo search***: performs a web search using DuckDuckGo browser. - **Python code interpreter**: runs your the LLM generated Python code in a secure environment. This tool will only be added to [`ReactJsonAgent`] if you initialize it with `add_base_tools=True`, since code-based agent can already natively execute Python code You can manually use a tool by calling the [`load_tool`] function and a task to perform. ```python from transformers import load_tool tool = load_tool("text-to-speech") audio = tool("This is a text to speech tool") ``` ### Create a new tool You can create your own tool for use cases not covered by the default tools from Hugging Face. For example, let's create a tool that returns the most downloaded model for a given task from the Hub. You'll start with the code below. ```python from huggingface_hub import list_models task = "text-classification" model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) print(model.id) ``` This code can quickly be converted into a tool, just by wrapping it in a function and adding the `tool` decorator: ```py from transformers import tool @tool def model_download_tool(task: str) -> str: """ This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. It returns the name of the checkpoint. Args: task: The task for which """ model = next(iter(list_models(filter="text-classification", sort="downloads", direction=-1))) return model.id ``` The function needs: - A clear name. The name usually describes what the tool does. Since the code returns the model with the most downloads for a task, let's put `model_download_tool`. - Type hints on both inputs and output - A description, that includes an 'Args:' part where each argument is described (without a type indication this time, it will be pulled from the type hint). All these will be automatically baked into the agent's system prompt upon initialization: so strive to make them as clear as possible! > [!TIP] > This definition format is the same as tool schemas used in `apply_chat_template`, the only difference is the added `tool` decorator: read more on our tool use API [here](https://huggingface.co/blog/unified-tool-use#passing-tools-to-a-chat-template). Then you can directly initialize your agent: ```py from transformers import CodeAgent agent = CodeAgent(tools=[model_download_tool], llm_engine=llm_engine) agent.run( "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?" ) ``` You get the following: ```text ======== New task ======== Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub? ==== Agent is executing the code below: most_downloaded_model = model_download_tool(task="text-to-video") print(f"The most downloaded model for the 'text-to-video' task is {most_downloaded_model}.") ==== ``` And the output: `"The most downloaded model for the 'text-to-video' task is ByteDance/AnimateDiff-Lightning."` ### Manage your agent's toolbox If you have already initialized an agent, it is inconvenient to reinitialize it from scratch with a tool you want to use. With Transformers, you can manage an agent's toolbox by adding or replacing a tool. Let's add the `model_download_tool` to an existing agent initialized with only the default toolbox. ```python from transformers import CodeAgent agent = CodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True) agent.toolbox.add_tool(model_download_tool) ``` Now we can leverage both the new tool and the previous text-to-speech tool: ```python agent.run( "Can you read out loud the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub and return the audio?" ) ``` | **Audio** | |------------------------------------------------------------------------------------------------------------------------------------------------------| | <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/damo.wav" type="audio/wav"/> | > [!WARNING] > Beware when adding tools to an agent that already works well because it can bias selection towards your tool or select another tool other than the one already defined. Use the `agent.toolbox.update_tool()` method to replace an existing tool in the agent's toolbox. This is useful if your new tool is a one-to-one replacement of the existing tool because the agent already knows how to perform that specific task. Just make sure the new tool follows the same API as the replaced tool or adapt the system prompt template to ensure all examples using the replaced tool are updated. ### Use a collection of tools You can leverage tool collections by using the ToolCollection object, with the slug of the collection you want to use. Then pass them as a list to initialize you agent, and start using them! ```py from transformers import ToolCollection, ReactCodeAgent image_tool_collection = ToolCollection(collection_slug="huggingface-tools/diffusion-tools-6630bb19a942c2306a2cdb6f") agent = ReactCodeAgent(tools=[*image_tool_collection.tools], add_base_tools=True) agent.run("Please draw me a picture of rivers and lakes.") ``` To speed up the start, tools are loaded only if called by the agent. This gets you this image: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png">
transformers/docs/source/en/agents.md/0
{ "file_path": "transformers/docs/source/en/agents.md", "repo_id": "transformers", "token_count": 5761 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Text generation strategies Text generation is essential to many NLP tasks, such as open-ended text generation, summarization, translation, and more. It also plays a role in a variety of mixed-modality applications that have text as an output like speech-to-text and vision-to-text. Some of the models that can generate text include GPT2, XLNet, OpenAI GPT, CTRL, TransformerXL, XLM, Bart, T5, GIT, Whisper. Check out a few examples that use [`~generation.GenerationMixin.generate`] method to produce text outputs for different tasks: * [Text summarization](./tasks/summarization#inference) * [Image captioning](./model_doc/git#transformers.GitForCausalLM.forward.example) * [Audio transcription](./model_doc/whisper#transformers.WhisperForConditionalGeneration.forward.example) Note that the inputs to the generate method depend on the model's modality. They are returned by the model's preprocessor class, such as AutoTokenizer or AutoProcessor. If a model's preprocessor creates more than one kind of input, pass all the inputs to generate(). You can learn more about the individual model's preprocessor in the corresponding model's documentation. The process of selecting output tokens to generate text is known as decoding, and you can customize the decoding strategy that the `generate()` method will use. Modifying a decoding strategy does not change the values of any trainable parameters. However, it can have a noticeable impact on the quality of the generated output. It can help reduce repetition in the text and make it more coherent. This guide describes: * default generation configuration * common decoding strategies and their main parameters * saving and sharing custom generation configurations with your fine-tuned model on 🤗 Hub <Tip> `generate()` is a critical component of our [chat CLI](quicktour#chat-with-text-generation-models). You can apply the learnings of this guide there as well. </Tip> ## Default text generation configuration A decoding strategy for a model is defined in its generation configuration. When using pre-trained models for inference within a [`pipeline`], the models call the `PreTrainedModel.generate()` method that applies a default generation configuration under the hood. The default configuration is also used when no custom configuration has been saved with the model. When you load a model explicitly, you can inspect the generation configuration that comes with it through `model.generation_config`: ```python >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") >>> model.generation_config GenerationConfig { "bos_token_id": 50256, "eos_token_id": 50256 } <BLANKLINE> ``` Printing out the `model.generation_config` reveals only the values that are different from the default generation configuration, and does not list any of the default values. The default generation configuration limits the size of the output combined with the input prompt to a maximum of 20 tokens to avoid running into resource limitations. The default decoding strategy is greedy search, which is the simplest decoding strategy that picks a token with the highest probability as the next token. For many tasks and small output sizes this works well. However, when used to generate longer outputs, greedy search can start producing highly repetitive results. ## Customize text generation You can override any `generation_config` by passing the parameters and their values directly to the [`generate`] method: ```python >>> my_model.generate(**inputs, num_beams=4, do_sample=True) # doctest: +SKIP ``` Even if the default decoding strategy mostly works for your task, you can still tweak a few things. Some of the commonly adjusted parameters include: - `max_new_tokens`: the maximum number of tokens to generate. In other words, the size of the output sequence, not including the tokens in the prompt. As an alternative to using the output's length as a stopping criteria, you can choose to stop generation whenever the full generation exceeds some amount of time. To learn more, check [`StoppingCriteria`]. - `num_beams`: by specifying a number of beams higher than 1, you are effectively switching from greedy search to beam search. This strategy evaluates several hypotheses at each time step and eventually chooses the hypothesis that has the overall highest probability for the entire sequence. This has the advantage of identifying high-probability sequences that start with a lower probability initial tokens and would've been ignored by the greedy search. Visualize how it works [here](https://huggingface.co/spaces/m-ric/beam_search_visualizer). - `do_sample`: if set to `True`, this parameter enables decoding strategies such as multinomial sampling, beam-search multinomial sampling, Top-K sampling and Top-p sampling. All these strategies select the next token from the probability distribution over the entire vocabulary with various strategy-specific adjustments. - `num_return_sequences`: the number of sequence candidates to return for each input. This option is only available for the decoding strategies that support multiple sequence candidates, e.g. variations of beam search and sampling. Decoding strategies like greedy search and contrastive search return a single output sequence. It is also possible to extend `generate()` with external libraries or handcrafted code. The `logits_processor` argument allows you to pass custom [`LogitsProcessor`] instances, allowing you to manipulate the next token probability distributions. Likewise, the `stopping_criteria` argument lets you set custom [`StoppingCriteria`] to stop text generation. The [`logits-processor-zoo`](https://github.com/NVIDIA/logits-processor-zoo) library contains examples of external `generate()`-compatible extensions. ## Save a custom decoding strategy with your model If you would like to share your fine-tuned model with a specific generation configuration, you can: * Create a [`GenerationConfig`] class instance * Specify the decoding strategy parameters * Save your generation configuration with [`GenerationConfig.save_pretrained`], making sure to leave its `config_file_name` argument empty * Set `push_to_hub` to `True` to upload your config to the model's repo ```python >>> from transformers import AutoModelForCausalLM, GenerationConfig >>> model = AutoModelForCausalLM.from_pretrained("my_account/my_model") # doctest: +SKIP >>> generation_config = GenerationConfig( ... max_new_tokens=50, do_sample=True, top_k=50, eos_token_id=model.config.eos_token_id ... ) >>> generation_config.save_pretrained("my_account/my_model", push_to_hub=True) # doctest: +SKIP ``` You can also store several generation configurations in a single directory, making use of the `config_file_name` argument in [`GenerationConfig.save_pretrained`]. You can later instantiate them with [`GenerationConfig.from_pretrained`]. This is useful if you want to store several generation configurations for a single model (e.g. one for creative text generation with sampling, and one for summarization with beam search). You must have the right Hub permissions to add configuration files to a model. ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") >>> translation_generation_config = GenerationConfig( ... num_beams=4, ... early_stopping=True, ... decoder_start_token_id=0, ... eos_token_id=model.config.eos_token_id, ... pad_token=model.config.pad_token_id, ... ) >>> # Tip: add `push_to_hub=True` to push to the Hub >>> translation_generation_config.save_pretrained("/tmp", "translation_generation_config.json") >>> # You could then use the named generation config file to parameterize generation >>> generation_config = GenerationConfig.from_pretrained("/tmp", "translation_generation_config.json") >>> inputs = tokenizer("translate English to French: Configuration files are easy to use!", return_tensors="pt") >>> outputs = model.generate(**inputs, generation_config=generation_config) >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['Les fichiers de configuration sont faciles à utiliser!'] ``` ## Streaming The `generate()` supports streaming, through its `streamer` input. The `streamer` input is compatible with any instance from a class that has the following methods: `put()` and `end()`. Internally, `put()` is used to push new tokens and `end()` is used to flag the end of text generation. <Tip warning={true}> The API for the streamer classes is still under development and may change in the future. </Tip> In practice, you can craft your own streaming class for all sorts of purposes! We also have basic streaming classes ready for you to use. For example, you can use the [`TextStreamer`] class to stream the output of `generate()` into your screen, one word at a time: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") >>> streamer = TextStreamer(tok) >>> # Despite returning the usual output, the streamer will also print the generated text to stdout. >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20) An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven, ``` ## Watermarking The `generate()` supports watermarking the generated text by randomly marking a portion of tokens as "green". When generating the "green" will have a small 'bias' value added to their logits, thus having a higher chance to be generated. The watermarked text can be detected by calculating the proportion of "green" tokens in the text and estimating how likely it is statistically to obtain that amount of "green" tokens for human-generated text. This watermarking strategy was proposed in the paper ["On the Reliability of Watermarks for Large Language Models"](https://arxiv.org/abs/2306.04634). For more information on the inner functioning of watermarking, it is recommended to refer to the paper. The watermarking can be used with any generative model in `tranformers` and does not require an extra classification model to detect watermarked text. To trigger watermarking, pass in a [`WatermarkingConfig`] with needed arguments directly to the `.generate()` method or add it to the [`GenerationConfig`]. Watermarked text can be later detected with a [`WatermarkDetector`]. <Tip warning={true}> The WatermarkDetector internally relies on the proportion of "green" tokens, and whether generated text follows the coloring pattern. That is why it is recommended to strip off the prompt text, if it is much longer than the generated text. This also can have an effect when one sequence in the batch is a lot longer causing other rows to be padded. Additionally, the detector **must** be initiated with identical watermark configuration arguments used when generating. </Tip> Let's generate some text with watermarking. In the below code snippet, we set the bias to 2.5 which is a value that will be added to "green" tokens' logits. After generating watermarked text, we can pass it directly to the `WatermarkDetector` to check if the text is machine-generated (outputs `True` for machine-generated and `False` otherwise). ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, WatermarkDetector, WatermarkingConfig >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> tok.pad_token_id = tok.eos_token_id >>> tok.padding_side = "left" >>> inputs = tok(["This is the beginning of a long story", "Alice and Bob are"], padding=True, return_tensors="pt") >>> input_len = inputs["input_ids"].shape[-1] >>> watermarking_config = WatermarkingConfig(bias=2.5, seeding_scheme="selfhash") >>> out = model.generate(**inputs, watermarking_config=watermarking_config, do_sample=False, max_length=20) >>> detector = WatermarkDetector(model_config=model.config, device="cpu", watermarking_config=watermarking_config) >>> detection_out = detector(out, return_dict=True) >>> detection_out.prediction array([ True, True]) ``` ## Decoding strategies Certain combinations of the `generate()` parameters, and ultimately `generation_config`, can be used to enable specific decoding strategies. If you are new to this concept, we recommend reading [this blog post that illustrates how common decoding strategies work](https://huggingface.co/blog/how-to-generate). Here, we'll show some of the parameters that control the decoding strategies and illustrate how you can use them. <Tip> Selecting a given decoding strategy is not the only way you can influence the outcome of `generate()` with your model. The decoding strategies act based (mostly) on the logits, the distribution of probabilities for the next token, and thus selecting a good logits manipulation strategy can go a long way! In other words, manipulating the logits is another dimension you can act upon, in addition to selecting a decoding strategy. Popular logits manipulation strategies include `top_p`, `min_p`, and `repetition_penalty` -- you can check the full list in the [`GenerationConfig`] class. </Tip> ### Greedy Search [`generate`] uses greedy search decoding by default so you don't have to pass any parameters to enable it. This means the parameters `num_beams` is set to 1 and `do_sample=False`. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "I look forward to" >>> checkpoint = "distilbert/distilgpt2" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['I look forward to seeing you all again!\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'] ``` ### Contrastive search The contrastive search decoding strategy was proposed in the 2022 paper [A Contrastive Framework for Neural Text Generation](https://arxiv.org/abs/2202.06417). It demonstrates superior results for generating non-repetitive yet coherent long outputs. To learn how contrastive search works, check out [this blog post](https://huggingface.co/blog/introducing-csearch). The two main parameters that enable and control the behavior of contrastive search are `penalty_alpha` and `top_k`: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> checkpoint = "openai-community/gpt2-large" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> prompt = "Hugging Face Company is" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> outputs = model.generate(**inputs, penalty_alpha=0.6, top_k=4, max_new_tokens=100) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Hugging Face Company is a family owned and operated business. We pride ourselves on being the best in the business and our customer service is second to none.\n\nIf you have any questions about our products or services, feel free to contact us at any time. We look forward to hearing from you!'] ``` ### Multinomial sampling As opposed to greedy search that always chooses a token with the highest probability as the next token, multinomial sampling (also called ancestral sampling) randomly selects the next token based on the probability distribution over the entire vocabulary given by the model. Every token with a non-zero probability has a chance of being selected, thus reducing the risk of repetition. To enable multinomial sampling set `do_sample=True` and `num_beams=1`. ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed >>> set_seed(0) # For reproducibility >>> checkpoint = "openai-community/gpt2-large" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> prompt = "Today was an amazing day because" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> outputs = model.generate(**inputs, do_sample=True, num_beams=1, max_new_tokens=100) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Today was an amazing day because we received these wonderful items by the way of a gift shop. The box arrived on a Thursday and I opened it on Monday afternoon to receive the gifts. Both bags featured pieces from all the previous years!\n\nThe box had lots of surprises in it, including some sweet little mini chocolate chips! I don't think I'd eat all of these. This was definitely one of the most expensive presents I have ever got, I actually got most of them for free!\n\nThe first package came"] ``` ### Beam-search decoding Unlike greedy search, beam-search decoding keeps several hypotheses at each time step and eventually chooses the hypothesis that has the overall highest probability for the entire sequence. This has the advantage of identifying high-probability sequences that start with lower probability initial tokens and would've been ignored by the greedy search. <a href="https://huggingface.co/spaces/m-ric/beam_search_visualizer" class="flex flex-col justify-center"> <img style="max-width: 90%; margin: auto;" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/beam_search.png"/> </a> You can visualize how beam-search decoding works in [this interactive demo](https://huggingface.co/spaces/m-ric/beam_search_visualizer): type your input sentence, and play with the parameters to see how the decoding beams change. To enable this decoding strategy, specify the `num_beams` (aka number of hypotheses to keep track of) that is greater than 1. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "It is astonishing how one can" >>> checkpoint = "openai-community/gpt2-medium" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, max_new_tokens=50) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['It is astonishing how one can have such a profound impact on the lives of so many people in such a short period of time."\n\nHe added: "I am very proud of the work I have been able to do in the last few years.\n\n"I have'] ``` ### Beam-search multinomial sampling As the name implies, this decoding strategy combines beam search with multinomial sampling. You need to specify the `num_beams` greater than 1, and set `do_sample=True` to use this decoding strategy. ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, set_seed >>> set_seed(0) # For reproducibility >>> prompt = "translate English to German: The house is wonderful." >>> checkpoint = "google-t5/t5-small" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, do_sample=True) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'Das Haus ist wunderbar.' ``` ### Diverse beam search decoding The diverse beam search decoding strategy is an extension of the beam search strategy that allows for generating a more diverse set of beam sequences to choose from. To learn how it works, refer to [Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models](https://arxiv.org/pdf/1610.02424.pdf). This approach has three main parameters: `num_beams`, `num_beam_groups`, and `diversity_penalty`. The diversity penalty ensures the outputs are distinct across groups, and beam search is used within each group. ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> checkpoint = "google/pegasus-xsum" >>> prompt = ( ... "The Permaculture Design Principles are a set of universal design principles " ... "that can be applied to any location, climate and culture, and they allow us to design " ... "the most efficient and sustainable human habitation and food production systems. " ... "Permaculture is a design system that encompasses a wide variety of disciplines, such " ... "as ecology, landscape design, environmental science and energy conservation, and the " ... "Permaculture design principles are drawn from these various disciplines. Each individual " ... "design principle itself embodies a complete conceptual framework based on sound " ... "scientific principles. When we bring all these separate principles together, we can " ... "create a design system that both looks at whole systems, the parts that these systems " ... "consist of, and how those parts interact with each other to create a complex, dynamic, " ... "living system. Each design principle serves as a tool that allows us to integrate all " ... "the separate parts of a design, referred to as elements, into a functional, synergistic, " ... "whole system, where the elements harmoniously interact and work together in the most " ... "efficient way possible." ... ) >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, num_beam_groups=5, max_new_tokens=30, diversity_penalty=1.0) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'The Design Principles are a set of universal design principles that can be applied to any location, climate and culture, and they allow us to design the' ``` This guide illustrates the main parameters that enable various decoding strategies. More advanced parameters exist for the [`generate`] method, which gives you even further control over the [`generate`] method's behavior. For the complete list of the available parameters, refer to the [API documentation](./main_classes/text_generation). ### Speculative Decoding Speculative decoding (also known as assisted decoding) is a modification of the decoding strategies above, that uses an assistant model (ideally a much smaller one), to generate a few candidate tokens. The main model then validates the candidate tokens in a single forward pass, which speeds up the decoding process. If `do_sample=True`, then the token validation with resampling introduced in the [speculative decoding paper](https://arxiv.org/pdf/2211.17192.pdf) is used. Assisted decoding assumes the main and assistant models have the same tokenizer, otherwise, see Universal Assisted Decoding below. Currently, only greedy search and sampling are supported with assisted decoding, and assisted decoding doesn't support batched inputs. To learn more about assisted decoding, check [this blog post](https://huggingface.co/blog/assisted-generation). To enable assisted decoding, set the `assistant_model` argument with a model. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "Alice and Bob" >>> checkpoint = "EleutherAI/pythia-1.4b-deduped" >>> assistant_checkpoint = "EleutherAI/pythia-160m-deduped" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint) >>> outputs = model.generate(**inputs, assistant_model=assistant_model) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a glass of wine.'] ``` <Tip> If you're using a `pipeline` object, all you need to do is to pass the assistant checkpoint under `assistant_model` ```python >>> from transformers import pipeline >>> import torch >>> pipe = pipeline( ... "text-generation", ... model="meta-llama/Llama-3.1-8B", ... assistant_model="meta-llama/Llama-3.2-1B", # This extra line is all that's needed, also works with UAD ... torch_dtype=torch.bfloat16 ... ) >>> pipe_output = pipe("Once upon a time, ", max_new_tokens=50, do_sample=False) >>> pipe_output[0]["generated_text"] 'Once upon a time, 3D printing was a niche technology that was only' ``` </Tip> When using assisted decoding with sampling methods, you can use the `temperature` argument to control the randomness, just like in multinomial sampling. However, in assisted decoding, reducing the temperature may help improve the latency. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed >>> set_seed(42) # For reproducibility >>> prompt = "Alice and Bob" >>> checkpoint = "EleutherAI/pythia-1.4b-deduped" >>> assistant_checkpoint = "EleutherAI/pythia-160m-deduped" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint) >>> outputs = model.generate(**inputs, assistant_model=assistant_model, do_sample=True, temperature=0.5) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Alice and Bob are two people who are very different, but they are both very good at what they do. Alice'] ``` We recommend to install `scikit-learn` library to enhance the candidate generation strategy and achieve additional speedup. #### Universal Assisted Decoding Universal Assisted Decoding (UAD) adds support for main and assistant models with different tokenizers. To use it, simply pass the tokenizers using the `tokenizer` and `assistant_tokenizer` arguments (see below). Internally, the main model input tokens are re-encoded into assistant model tokens, then candidate tokens are generated in the assistant encoding, which are in turn re-encoded into main model candidate tokens. Validation then proceeds as explained above. The re-encoding steps involve decoding token ids into text and then encoding the text using a different tokenizer. Since re-encoding the tokens may result in tokenization discrepancies, UAD finds the longest common subsequence between the source and target encodings, to ensure the new tokens include the correct prompt suffix. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "Alice and Bob" >>> checkpoint = "google/gemma-2-9b" >>> assistant_checkpoint = "double7/vicuna-68m" >>> assistant_tokenizer = AutoTokenizer.from_pretrained(assistant_checkpoint) >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint) >>> outputs = model.generate(**inputs, assistant_model=assistant_model, tokenizer=tokenizer, assistant_tokenizer=assistant_tokenizer) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Alice and Bob are playing a game. Alice has a set of $n$ integers $a_1, a'] ``` #### Prompt Lookup Alternatively, you can also set the `prompt_lookup_num_tokens` to trigger n-gram based assisted decoding, as opposed to model based assisted decoding. You can read more about it [here](https://twitter.com/joao_gante/status/1747322413006643259). #### Self-Speculative Decoding An LLM can be trained to also use its language modeling head with earlier hidden states as input, effectively skipping layers to yield a lower-quality output -- a technique called early exiting. We use the lower-quality early exit output as an assistant output, and apply self-speculation to fix the output using the remaining layers. The final generation of that self-speculative solution is the same (or has the same distribution) as the original model's generation. If the model you're using was trained to do early exit, you can pass `assistant_early_exit` (integer). In this case, the assistant model will be the same model but exiting early, hence the "self-speculative" name. Because the assistant model is a portion of the target model, caches and weights can be shared, which results in lower memory requirements. As in other assisted generation methods, the final generated result has the same quality as if no assistant had been used. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "Alice and Bob" >>> checkpoint = "facebook/layerskip-llama3.2-1B" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, assistant_early_exit=4, do_sample=False, max_new_tokens=20) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Alice and Bob are playing a game. Alice has a set of $n$ integers $a_1, a'] ``` ### DoLa Decoding **D**ecoding by C**o**ntrasting **La**yers (DoLa) is a contrastive decoding strategy to improve the factuality and reduce the hallucinations of LLMs, as described in this paper of ICLR 2024 [DoLa: Decoding by Contrasting Layers Improves Factuality in Large Language Models](https://arxiv.org/abs/2309.03883). DoLa is achieved by contrasting the differences in logits obtained from final layers versus earlier layers, thus amplify the factual knowledge localized to particular part of transformer layers. Do the following two steps to activate DoLa decoding when calling the `model.generate` function: 1. Set the `dola_layers` argument, which can be either a string or a list of integers. - If set to a string, it can be one of `low`, `high`. - If set to a list of integers, it should be a list of layer indices between 0 and the total number of layers in the model. The 0-th layer is word embedding, and the 1st layer is the first transformer layer, and so on. 2. Set `repetition_penalty = 1.2` is suggested to reduce repetition in DoLa decoding. See the following examples for DoLa decoding with the 32-layer LLaMA-7B model. ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed >>> import torch >>> from accelerate.test_utils.testing import get_backend >>> device, _, _ = get_backend() # automatically detects the underlying device type (CUDA, CPU, XPU, MPS, etc.) >>> tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b") >>> model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b", torch_dtype=torch.float16).to(device) >>> set_seed(42) >>> text = "On what date was the Declaration of Independence officially signed?" >>> inputs = tokenizer(text, return_tensors="pt").to(device) # Vanilla greddy decoding >>> vanilla_output = model.generate(**inputs, do_sample=False, max_new_tokens=50) >>> tokenizer.batch_decode(vanilla_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True) ['\nThe Declaration of Independence was signed on July 4, 1776.\nWhat was the date of the signing of the Declaration of Independence?\nThe Declaration of Independence was signed on July 4,'] # DoLa decoding with contrasting higher part of layers (layers 16,18,...,30) >>> dola_high_output = model.generate(**inputs, do_sample=False, max_new_tokens=50, dola_layers='high') >>> tokenizer.batch_decode(dola_high_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True) ['\nJuly 4, 1776, when the Continental Congress voted to separate from Great Britain. The 56 delegates to the Continental Congress signed the Declaration on August 2, 1776.'] # DoLa decoding with contrasting specific layers (layers 28 and 30) >>> dola_custom_output = model.generate(**inputs, do_sample=False, max_new_tokens=50, dola_layers=[28,30], repetition_penalty=1.2) >>> tokenizer.batch_decode(dola_custom_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True) ['\nIn 1891, when he was 54 years old, John Jacob Astor founded his empire. He opened a one-man business and spent the next 27 years working 10-hour days. When'] ``` #### Understanding the `dola_layers` argument `dola_layers` stands for the candidate layers in premature layer selection, as described in the DoLa paper. The selected premature layer will be contrasted with the final layer. Setting `dola_layers` to `'low'` or `'high'` will select the lower or higher part of the layers to contrast, respectively. - For `N`-layer models with `N <= 40` layers, the layers of `range(0, N // 2, 2)` and `range(N // 2, N, 2)` are used for `'low'` and `'high'` layers, respectively. - For models with `N > 40` layers, the layers of `range(0, 20, 2)` and `range(N - 20, N, 2)` are used for `'low'` and `'high'` layers, respectively. - If the model has tied word embeddings, we skip the word embeddings (0-th) layer and start from the 2nd layer, as the early exit from word embeddings will become identity function. - Set the `dola_layers` to a list of integers for layer indices to contrast manually specified layers. For example, setting `dola_layers=[28,30]` will contrast the final layer (32-th layer) with the 28-th and 30-th layers. The paper suggested that contrasting `'high'` layers to improve short-answer tasks like TruthfulQA, and contrasting `'low'` layers to improve all the other long-answer reasoning tasks, such as GSM8K, StrategyQA, FACTOR, and VicunaQA. Applying DoLa to smaller models like GPT-2 is not recommended, as the results shown in the Appendix N of the paper.
transformers/docs/source/en/generation_strategies.md/0
{ "file_path": "transformers/docs/source/en/generation_strategies.md", "repo_id": "transformers", "token_count": 9472 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Best Practices for Generation with Cache Efficient caching is crucial for optimizing the performance of models in various generative tasks, including text generation, translation, summarization and other transformer-based applications. Effective caching helps reduce computation time and improve response rates, especially in real-time or resource-intensive applications. Transformers support various caching methods, leveraging "Cache" classes to abstract and manage the caching logic. This document outlines best practices for using these classes to maximize performance and efficiency. Check out all the available `Cache` classes in the [API documentation](./internal/generation_utils). ## What is Cache and why we should care? Imagine you’re having a conversation with someone, and instead of remembering what was said previously, you have to start from scratch every time you respond. This would be slow and inefficient, right? In the world of Transformer models, a similar concept applies, and that's where Caching keys and values come into play. From now on, I'll refer to the concept as KV Cache. KV cache is needed to optimize the generation in autoregressive models, where the model predicts text token by token. This process can be slow since the model can generate only one token at a time, and each new prediction is dependent on the previous context. That means, to predict token number 1000 in the generation, you need information from the previous 999 tokens, which comes in the form of some matrix multiplications across the representations of those tokens. But to predict token number 1001, you also need the same information from the first 999 tokens, plus additional information from token number 1000. That is where key-value cache is used to optimize the sequential generation process by storing previous calculations to reuse in subsequent tokens, so they don't need to be computed again. More concretely, key-value cache acts as a memory bank for these generative models, where the model stores key-value pairs derived from self-attention layers for previously processed tokens. By storing this information, the model can avoid redundant computations and instead retrieve keys and values of previous tokens from the cache. Note that caching can be used only in inference and should be disabled when training, otherwise it might cause unexpected errors. <details> <summary><em>For the Curious Minds Who Like to Dive Deep</em></summary> ### Under the Hood: How Cache Object Works in Attention Mechanism When utilizing a cache object in the input, the Attention module performs several critical steps to integrate past and present information seamlessly. The Attention module concatenates the current key-values with the past key-values stored in the cache. This results in attention weights of shape `(new_tokens_length, past_kv_length + new_tokens_length)`. Essentially, the past and current key-values are combined to compute attention scores, ensuring that the model considers both previous context and new input. The concatenated key-values are used to compute the attention scores resulting in attention weights of shape `(new_tokens_length, past_kv_length + new_tokens_length)`. Therefore, when iteratively calling `forward()` instead of the `generate()` method, it’s crucial to ensure that the attention mask shape matches the combined length of past and current key-values. The attention mask should have the shape `(batch_size, past_kv_length + new_tokens_length)`. This is usually handled internally when you call `generate()` method. If you want to implement your own generation loop with Cache classes, take this into consideration and prepare the attention mask to hold values to current and past tokens. <Tip warning={true}> One important concept you need to know when writing your own generation loop, is `cache_position`. In case you want to reuse an already filled Cache object by calling `forward()`, you have to pass in a valid `cache_position` which will indicate the positions of inputs in the sequence. Note that `cache_position` is not affected by padding, and always adds one more position for each token. For example, if key/value cache contains 10 tokens (no matter how many of it is a pad token), the cache position for the next token should be `torch.tensor([10])`. </Tip> See an example below for how to implement your own generation loop. ```python >>> import torch >>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache >>> model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" >>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto") >>> tokenizer = AutoTokenizer.from_pretrained(model_id) >>> past_key_values = DynamicCache() >>> messages = [{"role": "user", "content": "Hello, what's your name."}] >>> inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device) >>> generated_ids = inputs.input_ids >>> cache_position = torch.arange(inputs.input_ids.shape[1], dtype=torch.int64, device=model.device) >>> max_new_tokens = 10 >>> for _ in range(max_new_tokens): ... outputs = model(**inputs, cache_position=cache_position, past_key_values=past_key_values, use_cache=True) ... # Greedily sample one next token ... next_token_ids = outputs.logits[:, -1:].argmax(-1) ... generated_ids = torch.cat([generated_ids, next_token_ids], dim=-1) ... ... # Prepare inputs for the next generation step by leaaving unprocessed tokens, in our case we have only one new token ... # and expanding attn mask for the new token, as explained above ... attention_mask = inputs["attention_mask"] ... attention_mask = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1) ... inputs = {"input_ids": next_token_ids, "attention_mask": attention_mask} ... cache_position = cache_position[-1:] + 1 # add one more position for the next token >>> print(tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]) ``` ```txt <|user|> Hello, what's your name. <|assistant|> My name is Sarah. <| ``` </details> ## Generate with Cache In 🤗 Transformers, we support various Cache types to optimize the performance across different models and tasks. By default, all models generate with caching, with the [`~DynamicCache`] class being the default cache for most models. It allows us to dynamically grow cache size, by saving more and more keys and values as we generate. If for some reason you don't want to use caches, you can pass `use_cache=False` into the `generate()` method. Refer to the table below to see the difference between cache types and choose the one that suits best for your use-case. Models for which initialization is recommended should be initialized before calling the model and passed to model as a kwarg. In all other cases you can simply define desired `cache_implementation` and we take care of the rest for you. | Cache Type | Memory Efficient | Supports torch.compile() | Initialization Recommended | Latency | Long Context Generation | |------------------------|------------------|--------------------------|----------------------------|---------|-------------------------| | Dynamic Cache | No | No | No | Mid | No | | Static Cache | No | Yes | Yes | High | No | | Offloaded Cache | Yes | No | No | Low | Yes | | Offloaded Static Cache | No | Yes | Yes | High | Yes | | Quantized Cache | Yes | No | No | Low | Yes | | Sliding Window Cache | No | Yes | Yes | High | No | | Sink Cache | Yes | No | Yes | Mid | Yes | These cache classes can be set with a `cache_implementation` argument when generating. To learn about the available options for the cache_implementation flag, please refer to the [API Documentation](./main_classes/text_generation#transformers.GenerationConfig). Now, let's explore each cache type in detail and see how to use them. Note that the below examples are for decoder-only Tranformer-based models. We also support ["Model-Specific Cache"] classes for models such as Mamba or Jamba, keep reading for more details. ### Quantized Cache The key and value cache can occupy a large portion of memory, becoming a [bottleneck for long-context generation](https://huggingface.co/blog/llama31#inference-memory-requirements), especially for Large Language Models. Quantizing the cache when using `generate()` can significantly reduce memory requirements at the cost of speed. KV Cache quantization in `transformers` is largely inspired by the paper ["KIVI: A Tuning-Free Asymmetric 2bit Quantization for KV Cache"](https://arxiv.org/abs/2402.02750) and currently supports [`~QuantoQuantizedCache`] and [`~HQQQuantizedCache`] classes. For more information on the inner workings see the paper. To enable quantization of the key-value cache, one needs to indicate `cache_implementation="quantized"` in the `generation_config`. Quantization related arguments should be passed to the `generation_config` either as a `dict` or an instance of a [`~QuantizedCacheConfig`] class. One has to indicate which quantization backend to use in the [`~QuantizedCacheConfig`], the default is `quanto`. It is recommended to set `axis-key/axis-value` parameters in the cache config to `0` if you're using the `quanto` backend and to `1` if you're using the `HQQ` backend. For other config values, please use the defaults unless you're running out of memory. In that case, you may consider decreasing the residual length. <Tip warning={true}> Cache quantization can be detrimental in terms of latency if the context length is short and there is enough GPU VRAM available to run without cache quantization. It is recommended to seek balance between memory efficiency and latency. </Tip> ```python >>> import torch >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0") >>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16, device_map="auto") >>> inputs = tokenizer("I like rock music because", return_tensors="pt").to(model.device) >>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="quantized", cache_config={"nbits": 4, "backend": "quanto"}) >>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0]) I like rock music because it's a great way to express myself. I like the way it makes me feel, the ``` ### Offloaded Cache Similarly to KV cache quantization, [`~OffloadedCache`] strategy aims to reduce GPU VRAM usage. It does so by moving the KV cache for most layers to the CPU. As the model's `forward()` method iterates over the layers, this strategy maintains the current layer cache on the GPU. At the same time it asynchronously prefetches the next layer cache as well as sending the previous layer cache back to the CPU. Unlike KV cache quantization, this strategy always produces the same result as the default KV cache implementation. Thus, it can serve as a drop-in replacement or a fallback for it. Depending on your model and the characteristics of your generation task (size of context, number of generated tokens, number of beams, etc.) you may notice a small degradation in generation throughput compared to the default KV cache implementation. To enable KV cache offloading, pass `cache_implementation="offloaded"` in the `generation_config` or directly to the `generate()` call. Use `cache_implementation="offloaded_static"` for an offloaded static cache (see also [Offloaded Static Cache](#offloaded-static-cache) below). ```python >>> import torch >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> ckpt = "microsoft/Phi-3-mini-4k-instruct" >>> tokenizer = AutoTokenizer.from_pretrained(ckpt) >>> model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16, device_map="auto") >>> inputs = tokenizer("Fun fact: The shortest", return_tensors="pt").to(model.device) >>> out = model.generate(**inputs, do_sample=False, max_new_tokens=23, cache_implementation="offloaded") >>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0]) Fun fact: The shortest war in history was between Britain and Zanzibar on August 27, 1896. >>> out = model.generate(**inputs, do_sample=False, max_new_tokens=23) >>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0]) Fun fact: The shortest war in history was between Britain and Zanzibar on August 27, 1896. ``` <Tip warning={true}> Cache offloading requires a CUDA GPU and can be slower than dynamic KV cache. Use it if you are getting CUDA out of memory errors. </Tip> The example below shows how KV cache offloading can be used as a fallback strategy. ```python >>> import torch >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> def resilient_generate(model, *args, **kwargs): ... oom = False ... try: ... return model.generate(*args, **kwargs) ... except torch.cuda.OutOfMemoryError as e: ... print(e) ... print("retrying with cache_implementation='offloaded'") ... oom = True ... if oom: ... torch.cuda.empty_cache() ... kwargs["cache_implementation"] = "offloaded" ... return model.generate(*args, **kwargs) ... ... >>> ckpt = "microsoft/Phi-3-mini-4k-instruct" >>> tokenizer = AutoTokenizer.from_pretrained(ckpt) >>> model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16).to("cuda:0") >>> prompt = ["okay "*1000 + "Fun fact: The most"] >>> inputs = tokenizer(prompt, return_tensors="pt").to(model.device) >>> beams = { "num_beams": 40, "num_beam_groups": 40, "num_return_sequences": 40, "diversity_penalty": 1.0, "max_new_tokens": 23, "early_stopping": True, } >>> out = resilient_generate(model, **inputs, **beams) >>> responses = tokenizer.batch_decode(out[:,-28:], skip_special_tokens=True) ``` On a GPU with 50 GB of RAM, running this code will print ``` CUDA out of memory. Tried to allocate 4.83 GiB. GPU retrying with cache_implementation='offloaded' ``` before successfully generating 40 beams. ### Static Cache Since the "DynamicCache" dynamically grows with each generation step, it prevents you from taking advantage of JIT optimizations. The [`~StaticCache`] pre-allocates a specific maximum size for the keys and values, allowing you to generate up to the maximum length without having to modify cache size. Check the below usage example. For more examples with Static Cache and JIT compilation, take a look at [StaticCache & torchcompile](./llm_optims#static-kv-cache-and-torchcompile) ```python >>> import torch >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0") >>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16, device_map="auto") >>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device) >>> # simply pass the cache implementation="static" >>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="static") >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] "Hello, my name is [Your Name] and I am a [Your Position] at [Your Company]. I am writing" ``` ## Offloaded Static Cache Like [`~OffloadedCache`] exists for offloading a "DynamicCache", there is also an offloaded static cache. It fully supports JIT optimizations. Just pass `cache_implementation="offloaded_static"` in the `generation_config` or directly to the `generate()` call. This will use the [`~OffloadedStaticCache`] implementation instead. ```python >>> import torch >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") >>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto") >>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device) >>> # simply pass the cache implementation="offloaded_static" >>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="offloaded_static") >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] "Hello, my name is [Your Name], and I am a [Your Profession] with [Number of Years] of" ``` Cache offloading requires a CUDA GPU. ### Sliding Window Cache As the name suggests, this cache type implements a sliding window over previous keys and values, retaining only the last `sliding_window` tokens. It should be used with models like Mistral that support sliding window attention. Additionally, similar to Static Cache, this one is JIT-friendly and can be used with the same compile tecniques as Static Cache. Note that you can use this cache only for models that support sliding window, e.g. Mistral models. ```python >>> import torch >>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache >>> tokenizer = AutoTokenizer.from_pretrained("teknium/OpenHermes-2.5-Mistral-7B") >>> model = AutoModelForCausalLM.from_pretrained("teknium/OpenHermes-2.5-Mistral-7B", torch_dtype=torch.float16, device_map="auto") >>> inputs = tokenizer("Yesterday I was on a rock concert and.", return_tensors="pt").to(model.device) >>> # can be used by passing in cache implementation >>> out = model.generate(**inputs, do_sample=False, max_new_tokens=30, cache_implementation="sliding_window") >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] "Yesterday I was on a rock concert and. I was so excited to see my favorite band perform live. I was so happy that I could hardly contain myself. I was jumping up and down and" ``` ### Sink Cache Sink Cache was introduced in ["Efficient Streaming Language Models with Attention Sinks"](https://arxiv.org/abs/2309.17453). It allows you to generate long sequences of text ("infinite length" according to the paper) without any fine-tuning. That is achieved by smart handling of previous keys and values, specifically it retains a few initial tokens from the sequence, called "sink tokens". This is based on the observation that these initial tokens attract a significant portion of attention scores during the generation process. Tokens that come after "sink tokens" are discarded on a sliding windowed basis, keeping only the latest `window_size` tokens. By keeping these initial tokens as "attention sinks," the model maintains stable performance even when dealing with very long texts, thus discarding most of the previous knowledge. Unlike other cache classes, this one can't be used directly by indicating a `cache_implementation`. You have to initialize the Cache before calling on `generate()` as follows. ```python >>> import torch >>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache >>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0") >>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16, device_map="auto") >>> inputs = tokenizer("This is a long story about unicorns, fairies and magic.", return_tensors="pt").to(model.device) >>> # get our cache, specify number of sink tokens and window size >>> # Note that window size already includes sink tokens, so has to be larger >>> past_key_values = SinkCache(window_length=256, num_sink_tokens=4) >>> out = model.generate(**inputs, do_sample=False, max_new_tokens=30, past_key_values=past_key_values) >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] "This is a long story about unicorns, fairies and magic. It is a story about a young girl named Lily who discovers that she has the power to control the elements. She learns that she can" ``` ### Encoder-Decoder Cache The [`~EncoderDecoderCache`] is a wrapper designed to handle the caching needs of encoder-decoder models. This cache type is specifically built to manage both self-attention and cross-attention caches, ensuring storage and retrieval of past key/values required for these complex models. Cool thing about Encoder-Decoder Cache is that you can set different cache types for the encoder and for the decoder, depending on your use case. Currently this cache is only supported in [Whisper](./model_doc/whisper) models but we will be adding more models soon. In terms of usage, there is nothing special to be done and calling `generate()` or `forward()` will handle everything for you. ### Model-specific Cache Classes Some models require storing previous keys, values, or states in a specific way, and the above cache classes cannot be used. For such cases, we have several specialized cache classes that are designed for specific models. These models only accept their own dedicated cache classes and do not support using any other cache types. Some examples include [`~HybridCache`] for [Gemma2](./model_doc/gemma2) series models or [`~MambaCache`] for [Mamba](./model_doc/mamba) architecture models. ## Iterative Generation with Cache We have seen how to use each of the cache types when generating. What if you want to use cache in iterative generation setting, for example in applications like chatbots, where interactions involve multiple turns and continuous back-and-forth exchanges. Iterative generation with cache allows these systems to handle ongoing conversations effectively without reprocessing the entire context at each step. But there are some tips that you should know before you start implementing: The general format when doing iterative generation is as below. First you have to initialize an empty cache of the type you want, and you can start feeding in new prompts iteratively. Keeping track of dialogues history and formatting can be done with chat templates, read more on that in [chat_templating](./chat_templating) In case you are using Sink Cache, you have to crop your inputs to that maximum length because Sink Cache can generate text longer than its maximum window size, but it expects the first input to not exceed the maximum cache length. ```python >>> import torch >>> from transformers import AutoTokenizer,AutoModelForCausalLM >>> from transformers.cache_utils import ( ... DynamicCache, ... SinkCache, ... StaticCache, ... SlidingWindowCache, ... QuantoQuantizedCache, ... QuantizedCacheConfig, ... ) >>> model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" >>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map='auto') >>> tokenizer = AutoTokenizer.from_pretrained(model_id) >>> user_prompts = ["Hello, what's your name?", "Btw, yesterday I was on a rock concert."] >>> past_key_values = DynamicCache() >>> max_cache_length = past_key_values.get_max_cache_shape() >>> messages = [] >>> for prompt in user_prompts: ... messages.append({"role": "user", "content": prompt}) ... inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device) ... if isinstance(past_key_values, SinkCache): ... inputs = {k: v[:, -max_cache_length:] for k, v in inputs.items()} ... ... input_length = inputs["input_ids"].shape[1] ... ... outputs = model.generate(**inputs, do_sample=False, max_new_tokens=256, past_key_values=past_key_values) ... completion = tokenizer.decode(outputs[0, input_length: ], skip_special_tokens=True) ... messages.append({"role": "assistant", "content": completion}) print(messages) [{'role': 'user', 'content': "Hello, what's your name?"}, {'role': 'assistant', 'content': "Hello, I'm AI."}, {'role': 'user', 'content': 'Btw, yesterday I was on a rock concert.'}, {'role': 'assistant', 'content': "I'm sorry to hear that you were on a rock concert yesterday. It sounds like a fun experience, but I'm not capable of experiencing music or concerts. However, I can provide you with some information about rock music and its history. Rock music emerged in the 1950s and 1960s in the United States and Britain, and it quickly gained popularity around the world. Some of the most famous rock bands of all time include The Beatles, The Rolling Stones, Led Zeppelin, and Pink Floyd. Rock music has a distinct sound and style, with elements of blues, country, and folk music. It often features guitar solos, heavy bass lines, and drums. Rock music has had a significant impact on popular culture, influencing genres such as punk rock, heavy metal, and alternative rock."}] ``` ## Re-use Cache to continue generation Sometimes you would want to first fill-in cache object with key/values for certain prefix prompt and re-use it several times to generate different sequences from it. In that case you can construct a `Cache` object that will hold the instruction prompt, and re-use it several times with different text sequences. ```python >>> import copy >>> import torch >>> from transformers import AutoModelForCausalLM, AutoTokenizer, DynamicCache, StaticCache >>> from accelerate.test_utils.testing import get_backend >>> DEVICE, _, _ = get_backend() # automatically detects the underlying device type (CUDA, CPU, XPU, MPS, etc.) >>> model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" >>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map=DEVICE) >>> tokenizer = AutoTokenizer.from_pretrained(model_id) >>> # Init StaticCache with big enough max-length (1024 tokens for the below example) >>> # You can also init a DynamicCache, if that suits you better >>> prompt_cache = StaticCache(config=model.config, max_batch_size=1, max_cache_len=1024, device=DEVICE, dtype=torch.bfloat16) >>> INITIAL_PROMPT = "You are a helpful assistant. " >>> inputs_initial_prompt = tokenizer(INITIAL_PROMPT, return_tensors="pt").to(DEVICE) >>> # This is the common prompt cached, we need to run forward without grad to be abel to copy >>> with torch.no_grad(): ... prompt_cache = model(**inputs_initial_prompt, past_key_values = prompt_cache).past_key_values >>> prompts = ["Help me to write a blogpost about travelling.", "What is the capital of France?"] >>> responses = [] >>> for prompt in prompts: ... new_inputs = tokenizer(INITIAL_PROMPT + prompt, return_tensors="pt").to(DEVICE) ... past_key_values = copy.deepcopy(prompt_cache) ... outputs = model.generate(**new_inputs, past_key_values=past_key_values,max_new_tokens=20) ... response = tokenizer.batch_decode(outputs)[0] ... responses.append(response) >>> print(responses) ['<s> You are a helpful assistant. Help me to write a blogpost about travelling. I am excited to share my experiences with you. I have been traveling for the past', '<s> You are a helpful assistant. What is the capital of France? \n\nAnswer: Paris is the capital of France.</s>'] ``` ## Legacy cache format Prior to the introduction of the `Cache` object, the cache of LLMs used to be a tuple of tuples of tensors. The legacy format has a dynamic size, growing as we generate text -- very similar to `DynamicCache`. If your project depend on this legacy format, you can seamlessly convert it to a `DynamicCache` and back. ```python >>> import torch >>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache >>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0") >>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16, device_map="auto") >>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device) >>> # `return_dict_in_generate=True` is required to return the cache. `return_legacy_cache` forces the returned cache >>> # to be of the legacy type >>> generation_outputs = model.generate(**inputs, return_dict_in_generate=True, return_legacy_cache=True, max_new_tokens=5) >>> # We can convert a legacy cache to a DynamicCache -- and the other way around. This is helpful if you have custom >>> # logic to manipulate a cache in a specific format. >>> cache = DynamicCache.from_legacy_cache(generation_outputs.past_key_values) >>> legacy_format_cache = cache.to_legacy_cache() ```
transformers/docs/source/en/kv_cache.md/0
{ "file_path": "transformers/docs/source/en/kv_cache.md", "repo_id": "transformers", "token_count": 8759 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Exporting 🤗 Transformers models to ONNX 🤗 Transformers provides a `transformers.onnx` package that enables you to convert model checkpoints to an ONNX graph by leveraging configuration objects. See the [guide](../serialization) on exporting 🤗 Transformers models for more details. ## ONNX Configurations We provide three abstract classes that you should inherit from, depending on the type of model architecture you wish to export: * Encoder-based models inherit from [`~onnx.config.OnnxConfig`] * Decoder-based models inherit from [`~onnx.config.OnnxConfigWithPast`] * Encoder-decoder models inherit from [`~onnx.config.OnnxSeq2SeqConfigWithPast`] ### OnnxConfig [[autodoc]] onnx.config.OnnxConfig ### OnnxConfigWithPast [[autodoc]] onnx.config.OnnxConfigWithPast ### OnnxSeq2SeqConfigWithPast [[autodoc]] onnx.config.OnnxSeq2SeqConfigWithPast ## ONNX Features Each ONNX configuration is associated with a set of _features_ that enable you to export models for different types of topologies or tasks. ### FeaturesManager [[autodoc]] onnx.features.FeaturesManager
transformers/docs/source/en/main_classes/onnx.md/0
{ "file_path": "transformers/docs/source/en/main_classes/onnx.md", "repo_id": "transformers", "token_count": 523 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Bamba ## Overview Bamba-9B is a decoder-only language model based on the [Mamba-2](https://github.com/state-spaces/mamba) architecture and is designed to handle a wide range of text generation tasks. It is trained from scratch using a two-stage training approach. In the first stage, the model is trained on 2 trillion tokens from the Dolma v1.7 dataset. In the second stage, it undergoes additional training on 200 billion tokens, leveraging a carefully curated blend of high-quality data to further refine its performance and enhance output quality. Checkout all Bamba-9B model checkpoints [here](https://github.com/foundation-model-stack/bamba). ## BambaConfig | Model | Params | # Layers | Hidden Dim. | Attention Heads | GQA | KV Heads | Context Length | Tied Embeddings | |-------------------|--------------|----------|-------------|-----------------|-----|----------|----------------|------------------| | Bamba | 9B (9.78B) | 32 | 4096 | 32 | Yes | 8 | 4096 | True | [[autodoc]] BambaConfig <!--- ## Usage Tips Tips: - The architecture is based on Mamba-2 models. ## BambaModel [[autodoc]] BambaModel - forward --> ## BambaForCausalLM ```python from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("ibm-fms/Bamba-9B") tokenizer = AutoTokenizer.from_pretrained("ibm-fms/Bamba-9B") message = ["Mamba is a snake with following properties "] inputs = tokenizer(message, return_tensors='pt', return_token_type_ids=False) response = model.generate(**inputs, max_new_tokens=64) print(tokenizer.batch_decode(response, skip_special_tokens=True)[0]) ``` [[autodoc]] BambaForCausalLM - forward This HF implementation is contributed by [ani300](https://github.com/ani300) and [fabianlim](https://github.com/fabianlim).
transformers/docs/source/en/model_doc/bamba.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bamba.md", "repo_id": "transformers", "token_count": 796 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeBERTa-v2 ## Overview The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen It is based on Google's BERT model released in 2018 and Facebook's RoBERTa model released in 2019. It builds on RoBERTa with disentangled attention and enhanced mask decoder training with half of the data used in RoBERTa. The abstract from the paper is the following: *Recent progress in pre-trained neural language models has significantly improved the performance of many natural language processing (NLP) tasks. In this paper we propose a new model architecture DeBERTa (Decoding-enhanced BERT with disentangled attention) that improves the BERT and RoBERTa models using two novel techniques. The first is the disentangled attention mechanism, where each word is represented using two vectors that encode its content and position, respectively, and the attention weights among words are computed using disentangled matrices on their contents and relative positions. Second, an enhanced mask decoder is used to replace the output softmax layer to predict the masked tokens for model pretraining. We show that these two techniques significantly improve the efficiency of model pretraining and performance of downstream tasks. Compared to RoBERTa-Large, a DeBERTa model trained on half of the training data performs consistently better on a wide range of NLP tasks, achieving improvements on MNLI by +0.9% (90.2% vs. 91.1%), on SQuAD v2.0 by +2.3% (88.4% vs. 90.7%) and RACE by +3.6% (83.2% vs. 86.8%). The DeBERTa code and pre-trained models will be made publicly available at https://github.com/microsoft/DeBERTa.* The following information is visible directly on the [original implementation repository](https://github.com/microsoft/DeBERTa). DeBERTa v2 is the second version of the DeBERTa model. It includes the 1.5B model used for the SuperGLUE single-model submission and achieving 89.9, versus human baseline 89.8. You can find more details about this submission in the authors' [blog](https://www.microsoft.com/en-us/research/blog/microsoft-deberta-surpasses-human-performance-on-the-superglue-benchmark/) New in v2: - **Vocabulary** In v2 the tokenizer is changed to use a new vocabulary of size 128K built from the training data. Instead of a GPT2-based tokenizer, the tokenizer is now [sentencepiece-based](https://github.com/google/sentencepiece) tokenizer. - **nGiE(nGram Induced Input Encoding)** The DeBERTa-v2 model uses an additional convolution layer aside with the first transformer layer to better learn the local dependency of input tokens. - **Sharing position projection matrix with content projection matrix in attention layer** Based on previous experiments, this can save parameters without affecting the performance. - **Apply bucket to encode relative positions** The DeBERTa-v2 model uses log bucket to encode relative positions similar to T5. - **900M model & 1.5B model** Two additional model sizes are available: 900M and 1.5B, which significantly improves the performance of downstream tasks. This model was contributed by [DeBERTa](https://huggingface.co/DeBERTa). This model TF 2.0 implementation was contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/DeBERTa). ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## DebertaV2Config [[autodoc]] DebertaV2Config ## DebertaV2Tokenizer [[autodoc]] DebertaV2Tokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## DebertaV2TokenizerFast [[autodoc]] DebertaV2TokenizerFast - build_inputs_with_special_tokens - create_token_type_ids_from_sequences <frameworkcontent> <pt> ## DebertaV2Model [[autodoc]] DebertaV2Model - forward ## DebertaV2PreTrainedModel [[autodoc]] DebertaV2PreTrainedModel - forward ## DebertaV2ForMaskedLM [[autodoc]] DebertaV2ForMaskedLM - forward ## DebertaV2ForSequenceClassification [[autodoc]] DebertaV2ForSequenceClassification - forward ## DebertaV2ForTokenClassification [[autodoc]] DebertaV2ForTokenClassification - forward ## DebertaV2ForQuestionAnswering [[autodoc]] DebertaV2ForQuestionAnswering - forward ## DebertaV2ForMultipleChoice [[autodoc]] DebertaV2ForMultipleChoice - forward </pt> <tf> ## TFDebertaV2Model [[autodoc]] TFDebertaV2Model - call ## TFDebertaV2PreTrainedModel [[autodoc]] TFDebertaV2PreTrainedModel - call ## TFDebertaV2ForMaskedLM [[autodoc]] TFDebertaV2ForMaskedLM - call ## TFDebertaV2ForSequenceClassification [[autodoc]] TFDebertaV2ForSequenceClassification - call ## TFDebertaV2ForTokenClassification [[autodoc]] TFDebertaV2ForTokenClassification - call ## TFDebertaV2ForQuestionAnswering [[autodoc]] TFDebertaV2ForQuestionAnswering - call ## TFDebertaV2ForMultipleChoice [[autodoc]] TFDebertaV2ForMultipleChoice - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/deberta-v2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/deberta-v2.md", "repo_id": "transformers", "token_count": 1846 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DiT ## Overview DiT was proposed in [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. DiT applies the self-supervised objective of [BEiT](beit) (BERT pre-training of Image Transformers) to 42 million document images, allowing for state-of-the-art results on tasks including: - document image classification: the [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset (a collection of 400,000 images belonging to one of 16 classes). - document layout analysis: the [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) dataset (a collection of more than 360,000 document images constructed by automatically parsing PubMed XML files). - table detection: the [ICDAR 2019 cTDaR](https://github.com/cndplab-founder/ICDAR2019_cTDaR) dataset (a collection of 600 training images and 240 testing images). The abstract from the paper is the following: *Image Transformer has recently achieved significant progress for natural image understanding, either using supervised (ViT, DeiT, etc.) or self-supervised (BEiT, MAE, etc.) pre-training techniques. In this paper, we propose DiT, a self-supervised pre-trained Document Image Transformer model using large-scale unlabeled text images for Document AI tasks, which is essential since no supervised counterparts ever exist due to the lack of human labeled document images. We leverage DiT as the backbone network in a variety of vision-based Document AI tasks, including document image classification, document layout analysis, as well as table detection. Experiment results have illustrated that the self-supervised pre-trained DiT model achieves new state-of-the-art results on these downstream tasks, e.g. document image classification (91.11 → 92.69), document layout analysis (91.0 → 94.9) and table detection (94.23 → 96.55). * <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/dit_architecture.jpg" alt="drawing" width="600"/> <small> Summary of the approach. Taken from the [original paper](https://arxiv.org/abs/2203.02378). </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/dit). ## Usage tips One can directly use the weights of DiT with the AutoModel API: ```python from transformers import AutoModel model = AutoModel.from_pretrained("microsoft/dit-base") ``` This will load the model pre-trained on masked image modeling. Note that this won't include the language modeling head on top, used to predict visual tokens. To include the head, you can load the weights into a `BeitForMaskedImageModeling` model, like so: ```python from transformers import BeitForMaskedImageModeling model = BeitForMaskedImageModeling.from_pretrained("microsoft/dit-base") ``` You can also load a fine-tuned model from the [hub](https://huggingface.co/models?other=dit), like so: ```python from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip") ``` This particular checkpoint was fine-tuned on [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/), an important benchmark for document image classification. A notebook that illustrates inference for document image classification can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DiT/Inference_with_DiT_(Document_Image_Transformer)_for_document_image_classification.ipynb). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DiT. <PipelineTag pipeline="image-classification"/> - [`BeitForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <Tip> As DiT's architecture is equivalent to that of BEiT, one can refer to [BEiT's documentation page](beit) for all tips, code examples and notebooks. </Tip>
transformers/docs/source/en/model_doc/dit.md/0
{ "file_path": "transformers/docs/source/en/model_doc/dit.md", "repo_id": "transformers", "token_count": 1429 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPT-Sw3 ## Overview The GPT-Sw3 model was first proposed in [Lessons Learned from GPT-SW3: Building the First Large-Scale Generative Language Model for Swedish](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.376.pdf) by Ariel Ekgren, Amaru Cuba Gyllensten, Evangelia Gogoulou, Alice Heiman, Severine Verlinden, Joey Öhman, Fredrik Carlsson, Magnus Sahlgren. Since that first paper the authors have extended their work and trained new models on their new 1.2TB corpora named The Nordic Pile. GPT-Sw3 is a collection of large decoder-only pretrained transformer language models that were developed by AI Sweden in collaboration with RISE and the WASP WARA for Media and Language. GPT-Sw3 has been trained on a dataset containing 320B tokens in Swedish, Norwegian, Danish, Icelandic, English, and programming code. The model was pretrained using a causal language modeling (CLM) objective utilizing the NeMo Megatron GPT implementation. This model was contributed by [AI Sweden Models](https://huggingface.co/AI-Sweden-Models). ## Usage example ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("AI-Sweden-Models/gpt-sw3-356m") >>> model = AutoModelForCausalLM.from_pretrained("AI-Sweden-Models/gpt-sw3-356m") >>> input_ids = tokenizer("Träd är fina för att", return_tensors="pt")["input_ids"] >>> generated_token_ids = model.generate(inputs=input_ids, max_new_tokens=10, do_sample=True)[0] >>> print(tokenizer.decode(generated_token_ids)) Träd är fina för att de är färgstarka. Men ibland är det fint ``` ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Causal language modeling task guide](../tasks/language_modeling) <Tip> The implementation uses the `GPT2Model` coupled with our `GPTSw3Tokenizer`. Refer to [GPT2Model documentation](gpt2) for API reference and examples. Note that sentencepiece is required to use our tokenizer and can be installed with `pip install transformers[sentencepiece]` or `pip install sentencepiece` </Tip> ## GPTSw3Tokenizer [[autodoc]] GPTSw3Tokenizer - save_vocabulary
transformers/docs/source/en/model_doc/gpt-sw3.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gpt-sw3.md", "repo_id": "transformers", "token_count": 879 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LayoutLMV2 ## Overview The LayoutLMV2 model was proposed in [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. LayoutLMV2 improves [LayoutLM](layoutlm) to obtain state-of-the-art results across several document image understanding benchmarks: - information extraction from scanned documents: the [FUNSD](https://guillaumejaume.github.io/FUNSD/) dataset (a collection of 199 annotated forms comprising more than 30,000 words), the [CORD](https://github.com/clovaai/cord) dataset (a collection of 800 receipts for training, 100 for validation and 100 for testing), the [SROIE](https://rrc.cvc.uab.es/?ch=13) dataset (a collection of 626 receipts for training and 347 receipts for testing) and the [Kleister-NDA](https://github.com/applicaai/kleister-nda) dataset (a collection of non-disclosure agreements from the EDGAR database, including 254 documents for training, 83 documents for validation, and 203 documents for testing). - document image classification: the [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset (a collection of 400,000 images belonging to one of 16 classes). - document visual question answering: the [DocVQA](https://arxiv.org/abs/2007.00398) dataset (a collection of 50,000 questions defined on 12,000+ document images). The abstract from the paper is the following: *Pre-training of text and layout has proved effective in a variety of visually-rich document understanding tasks due to its effective model architecture and the advantage of large-scale unlabeled scanned/digital-born documents. In this paper, we present LayoutLMv2 by pre-training text, layout and image in a multi-modal framework, where new model architectures and pre-training tasks are leveraged. Specifically, LayoutLMv2 not only uses the existing masked visual-language modeling task but also the new text-image alignment and text-image matching tasks in the pre-training stage, where cross-modality interaction is better learned. Meanwhile, it also integrates a spatial-aware self-attention mechanism into the Transformer architecture, so that the model can fully understand the relative positional relationship among different text blocks. Experiment results show that LayoutLMv2 outperforms strong baselines and achieves new state-of-the-art results on a wide variety of downstream visually-rich document understanding tasks, including FUNSD (0.7895 -> 0.8420), CORD (0.9493 -> 0.9601), SROIE (0.9524 -> 0.9781), Kleister-NDA (0.834 -> 0.852), RVL-CDIP (0.9443 -> 0.9564), and DocVQA (0.7295 -> 0.8672). The pre-trained LayoutLMv2 model is publicly available at this https URL.* LayoutLMv2 depends on `detectron2`, `torchvision` and `tesseract`. Run the following to install them: ```bash python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' python -m pip install torchvision tesseract ``` (If you are developing for LayoutLMv2, note that passing the doctests also requires the installation of these packages.) ## Usage tips - The main difference between LayoutLMv1 and LayoutLMv2 is that the latter incorporates visual embeddings during pre-training (while LayoutLMv1 only adds visual embeddings during fine-tuning). - LayoutLMv2 adds both a relative 1D attention bias as well as a spatial 2D attention bias to the attention scores in the self-attention layers. Details can be found on page 5 of the [paper](https://arxiv.org/abs/2012.14740). - Demo notebooks on how to use the LayoutLMv2 model on RVL-CDIP, FUNSD, DocVQA, CORD can be found [here](https://github.com/NielsRogge/Transformers-Tutorials). - LayoutLMv2 uses Facebook AI's [Detectron2](https://github.com/facebookresearch/detectron2/) package for its visual backbone. See [this link](https://detectron2.readthedocs.io/en/latest/tutorials/install.html) for installation instructions. - In addition to `input_ids`, [`~LayoutLMv2Model.forward`] expects 2 additional inputs, namely `image` and `bbox`. The `image` input corresponds to the original document image in which the text tokens occur. The model expects each document image to be of size 224x224. This means that if you have a batch of document images, `image` should be a tensor of shape (batch_size, 3, 224, 224). This can be either a `torch.Tensor` or a `Detectron2.structures.ImageList`. You don't need to normalize the channels, as this is done by the model. Important to note is that the visual backbone expects BGR channels instead of RGB, as all models in Detectron2 are pre-trained using the BGR format. The `bbox` input are the bounding boxes (i.e. 2D-positions) of the input text tokens. This is identical to [`LayoutLMModel`]. These can be obtained using an external OCR engine such as Google's [Tesseract](https://github.com/tesseract-ocr/tesseract) (there's a [Python wrapper](https://pypi.org/project/pytesseract/) available). Each bounding box should be in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that one first needs to normalize the bounding boxes to be on a 0-1000 scale. To normalize, you can use the following function: ```python def normalize_bbox(bbox, width, height): return [ int(1000 * (bbox[0] / width)), int(1000 * (bbox[1] / height)), int(1000 * (bbox[2] / width)), int(1000 * (bbox[3] / height)), ] ``` Here, `width` and `height` correspond to the width and height of the original document in which the token occurs (before resizing the image). Those can be obtained using the Python Image Library (PIL) library for example, as follows: ```python from PIL import Image image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ) width, height = image.size ``` However, this model includes a brand new [`~transformers.LayoutLMv2Processor`] which can be used to directly prepare data for the model (including applying OCR under the hood). More information can be found in the "Usage" section below. - Internally, [`~transformers.LayoutLMv2Model`] will send the `image` input through its visual backbone to obtain a lower-resolution feature map, whose shape is equal to the `image_feature_pool_shape` attribute of [`~transformers.LayoutLMv2Config`]. This feature map is then flattened to obtain a sequence of image tokens. As the size of the feature map is 7x7 by default, one obtains 49 image tokens. These are then concatenated with the text tokens, and send through the Transformer encoder. This means that the last hidden states of the model will have a length of 512 + 49 = 561, if you pad the text tokens up to the max length. More generally, the last hidden states will have a shape of `seq_length` + `image_feature_pool_shape[0]` * `config.image_feature_pool_shape[1]`. - When calling [`~transformers.LayoutLMv2Model.from_pretrained`], a warning will be printed with a long list of parameter names that are not initialized. This is not a problem, as these parameters are batch normalization statistics, which are going to have values when fine-tuning on a custom dataset. - If you want to train the model in a distributed environment, make sure to call [`synchronize_batch_norm`] on the model in order to properly synchronize the batch normalization layers of the visual backbone. In addition, there's LayoutXLM, which is a multilingual version of LayoutLMv2. More information can be found on [LayoutXLM's documentation page](layoutxlm). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LayoutLMv2. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A notebook on how to [finetune LayoutLMv2 for text-classification on RVL-CDIP dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/RVL-CDIP/Fine_tuning_LayoutLMv2ForSequenceClassification_on_RVL_CDIP.ipynb). - See also: [Text classification task guide](../tasks/sequence_classification) <PipelineTag pipeline="question-answering"/> - A notebook on how to [finetune LayoutLMv2 for question-answering on DocVQA dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/DocVQA/Fine_tuning_LayoutLMv2ForQuestionAnswering_on_DocVQA.ipynb). - See also: [Question answering task guide](../tasks/question_answering) - See also: [Document question answering task guide](../tasks/document_question_answering) <PipelineTag pipeline="token-classification"/> - A notebook on how to [finetune LayoutLMv2 for token-classification on CORD dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/CORD/Fine_tuning_LayoutLMv2ForTokenClassification_on_CORD.ipynb). - A notebook on how to [finetune LayoutLMv2 for token-classification on FUNSD dataset](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/Fine_tuning_LayoutLMv2ForTokenClassification_on_FUNSD_using_HuggingFace_Trainer.ipynb). - See also: [Token classification task guide](../tasks/token_classification) ## Usage: LayoutLMv2Processor The easiest way to prepare data for the model is to use [`LayoutLMv2Processor`], which internally combines a image processor ([`LayoutLMv2ImageProcessor`]) and a tokenizer ([`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`]). The image processor handles the image modality, while the tokenizer handles the text modality. A processor combines both, which is ideal for a multi-modal model like LayoutLMv2. Note that you can still use both separately, if you only want to handle one modality. ```python from transformers import LayoutLMv2ImageProcessor, LayoutLMv2TokenizerFast, LayoutLMv2Processor image_processor = LayoutLMv2ImageProcessor() # apply_ocr is set to True by default tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased") processor = LayoutLMv2Processor(image_processor, tokenizer) ``` In short, one can provide a document image (and possibly additional data) to [`LayoutLMv2Processor`], and it will create the inputs expected by the model. Internally, the processor first uses [`LayoutLMv2ImageProcessor`] to apply OCR on the image to get a list of words and normalized bounding boxes, as well to resize the image to a given size in order to get the `image` input. The words and normalized bounding boxes are then provided to [`LayoutLMv2Tokenizer`] or [`LayoutLMv2TokenizerFast`], which converts them to token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide word labels to the processor, which are turned into token-level `labels`. [`LayoutLMv2Processor`] uses [PyTesseract](https://pypi.org/project/pytesseract/), a Python wrapper around Google's Tesseract OCR engine, under the hood. Note that you can still use your own OCR engine of choice, and provide the words and normalized boxes yourself. This requires initializing [`LayoutLMv2ImageProcessor`] with `apply_ocr` set to `False`. In total, there are 5 use cases that are supported by the processor. Below, we list them all. Note that each of these use cases work for both batched and non-batched inputs (we illustrate them for non-batched inputs). **Use case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True** This is the simplest case, in which the processor (actually the image processor) will perform OCR on the image to get the words and normalized bounding boxes. ```python from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased") image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ).convert("RGB") encoding = processor( image, return_tensors="pt" ) # you can also add all tokenizer parameters here such as padding, truncation print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']) ``` **Use case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False** In case one wants to do OCR themselves, one can initialize the image processor with `apply_ocr` set to `False`. In that case, one should provide the words and corresponding (normalized) bounding boxes themselves to the processor. ```python from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ).convert("RGB") words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes encoding = processor(image, words, boxes=boxes, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']) ``` **Use case 3: token classification (training), apply_ocr=False** For token classification tasks (such as FUNSD, CORD, SROIE, Kleister-NDA), one can also provide the corresponding word labels in order to train a model. The processor will then convert these into token-level `labels`. By default, it will only label the first wordpiece of a word, and label the remaining wordpieces with -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. In case you want all wordpieces of a word to be labeled, you can initialize the tokenizer with `only_label_first_subword` set to `False`. ```python from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ).convert("RGB") words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes word_labels = [1, 2] encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'labels', 'image']) ``` **Use case 4: visual question answering (inference), apply_ocr=True** For visual question answering tasks (such as DocVQA), you can provide a question to the processor. By default, the processor will apply OCR on the image, and create [CLS] question tokens [SEP] word tokens [SEP]. ```python from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased") image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ).convert("RGB") question = "What's his name?" encoding = processor(image, question, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']) ``` **Use case 5: visual question answering (inference), apply_ocr=False** For visual question answering tasks (such as DocVQA), you can provide a question to the processor. If you want to perform OCR yourself, you can provide your own words and (normalized) bounding boxes to the processor. ```python from transformers import LayoutLMv2Processor from PIL import Image processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") image = Image.open( "name_of_your_document - can be a png, jpg, etc. of your documents (PDFs must be converted to images)." ).convert("RGB") question = "What's his name?" words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes encoding = processor(image, question, words, boxes=boxes, return_tensors="pt") print(encoding.keys()) # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'bbox', 'image']) ``` ## LayoutLMv2Config [[autodoc]] LayoutLMv2Config ## LayoutLMv2FeatureExtractor [[autodoc]] LayoutLMv2FeatureExtractor - __call__ ## LayoutLMv2ImageProcessor [[autodoc]] LayoutLMv2ImageProcessor - preprocess ## LayoutLMv2Tokenizer [[autodoc]] LayoutLMv2Tokenizer - __call__ - save_vocabulary ## LayoutLMv2TokenizerFast [[autodoc]] LayoutLMv2TokenizerFast - __call__ ## LayoutLMv2Processor [[autodoc]] LayoutLMv2Processor - __call__ ## LayoutLMv2Model [[autodoc]] LayoutLMv2Model - forward ## LayoutLMv2ForSequenceClassification [[autodoc]] LayoutLMv2ForSequenceClassification ## LayoutLMv2ForTokenClassification [[autodoc]] LayoutLMv2ForTokenClassification ## LayoutLMv2ForQuestionAnswering [[autodoc]] LayoutLMv2ForQuestionAnswering
transformers/docs/source/en/model_doc/layoutlmv2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/layoutlmv2.md", "repo_id": "transformers", "token_count": 5361 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LXMERT ## Overview The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan & Mohit Bansal. It is a series of bidirectional transformer encoders (one for the vision modality, one for the language modality, and then one to fuse both modalities) pretrained using a combination of masked language modeling, visual-language text alignment, ROI-feature regression, masked visual-attribute modeling, masked visual-object modeling, and visual-question answering objectives. The pretraining consists of multiple multi-modal datasets: MSCOCO, Visual-Genome + Visual-Genome Question Answering, VQA 2.0, and GQA. The abstract from the paper is the following: *Vision-and-language reasoning requires an understanding of visual concepts, language semantics, and, most importantly, the alignment and relationships between these two modalities. We thus propose the LXMERT (Learning Cross-Modality Encoder Representations from Transformers) framework to learn these vision-and-language connections. In LXMERT, we build a large-scale Transformer model that consists of three encoders: an object relationship encoder, a language encoder, and a cross-modality encoder. Next, to endow our model with the capability of connecting vision and language semantics, we pre-train the model with large amounts of image-and-sentence pairs, via five diverse representative pretraining tasks: masked language modeling, masked object prediction (feature regression and label classification), cross-modality matching, and image question answering. These tasks help in learning both intra-modality and cross-modality relationships. After fine-tuning from our pretrained parameters, our model achieves the state-of-the-art results on two visual question answering datasets (i.e., VQA and GQA). We also show the generalizability of our pretrained cross-modality model by adapting it to a challenging visual-reasoning task, NLVR, and improve the previous best result by 22% absolute (54% to 76%). Lastly, we demonstrate detailed ablation studies to prove that both our novel model components and pretraining strategies significantly contribute to our strong results; and also present several attention visualizations for the different encoders* This model was contributed by [eltoto1219](https://huggingface.co/eltoto1219). The original code can be found [here](https://github.com/airsplay/lxmert). ## Usage tips - Bounding boxes are not necessary to be used in the visual feature embeddings, any kind of visual-spacial features will work. - Both the language hidden states and the visual hidden states that LXMERT outputs are passed through the cross-modality layer, so they contain information from both modalities. To access a modality that only attends to itself, select the vision/language hidden states from the first input in the tuple. - The bidirectional cross-modality encoder attention only returns attention values when the language modality is used as the input and the vision modality is used as the context vector. Further, while the cross-modality encoder contains self-attention for each respective modality and cross-attention, only the cross attention is returned and both self attention outputs are disregarded. ## Resources - [Question answering task guide](../tasks/question_answering) ## LxmertConfig [[autodoc]] LxmertConfig ## LxmertTokenizer [[autodoc]] LxmertTokenizer ## LxmertTokenizerFast [[autodoc]] LxmertTokenizerFast ## Lxmert specific outputs [[autodoc]] models.lxmert.modeling_lxmert.LxmertModelOutput [[autodoc]] models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput [[autodoc]] models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput [[autodoc]] models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput [[autodoc]] models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput <frameworkcontent> <pt> ## LxmertModel [[autodoc]] LxmertModel - forward ## LxmertForPreTraining [[autodoc]] LxmertForPreTraining - forward ## LxmertForQuestionAnswering [[autodoc]] LxmertForQuestionAnswering - forward </pt> <tf> ## TFLxmertModel [[autodoc]] TFLxmertModel - call ## TFLxmertForPreTraining [[autodoc]] TFLxmertForPreTraining - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/lxmert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/lxmert.md", "repo_id": "transformers", "token_count": 1392 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # RAG <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=rag"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-rag-blueviolet"> </a> </div> ## Overview Retrieval-augmented generation ("RAG") models combine the powers of pretrained dense retrieval (DPR) and sequence-to-sequence models. RAG models retrieve documents, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks. It is based on the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela. The abstract from the paper is the following: *Large pre-trained language models have been shown to store factual knowledge in their parameters, and achieve state-of-the-art results when fine-tuned on downstream NLP tasks. However, their ability to access and precisely manipulate knowledge is still limited, and hence on knowledge-intensive tasks, their performance lags behind task-specific architectures. Additionally, providing provenance for their decisions and updating their world knowledge remain open research problems. Pre-trained models with a differentiable access mechanism to explicit nonparametric memory can overcome this issue, but have so far been only investigated for extractive downstream tasks. We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG) — models which combine pre-trained parametric and non-parametric memory for language generation. We introduce RAG models where the parametric memory is a pre-trained seq2seq model and the non-parametric memory is a dense vector index of Wikipedia, accessed with a pre-trained neural retriever. We compare two RAG formulations, one which conditions on the same retrieved passages across the whole generated sequence, the other can use different passages per token. We fine-tune and evaluate our models on a wide range of knowledge-intensive NLP tasks and set the state-of-the-art on three open domain QA tasks, outperforming parametric seq2seq models and task-specific retrieve-and-extract architectures. For language generation tasks, we find that RAG models generate more specific, diverse and factual language than a state-of-the-art parametric-only seq2seq baseline.* This model was contributed by [ola13](https://huggingface.co/ola13). ## Usage tips Retrieval-augmented generation ("RAG") models combine the powers of pretrained dense retrieval (DPR) and Seq2Seq models. RAG models retrieve docs, pass them to a seq2seq model, then marginalize to generate outputs. The retriever and seq2seq modules are initialized from pretrained models, and fine-tuned jointly, allowing both retrieval and generation to adapt to downstream tasks. ## RagConfig [[autodoc]] RagConfig ## RagTokenizer [[autodoc]] RagTokenizer ## Rag specific outputs [[autodoc]] models.rag.modeling_rag.RetrievAugLMMarginOutput [[autodoc]] models.rag.modeling_rag.RetrievAugLMOutput ## RagRetriever [[autodoc]] RagRetriever <frameworkcontent> <pt> ## RagModel [[autodoc]] RagModel - forward ## RagSequenceForGeneration [[autodoc]] RagSequenceForGeneration - forward - generate ## RagTokenForGeneration [[autodoc]] RagTokenForGeneration - forward - generate </pt> <tf> ## TFRagModel [[autodoc]] TFRagModel - call ## TFRagSequenceForGeneration [[autodoc]] TFRagSequenceForGeneration - call - generate ## TFRagTokenForGeneration [[autodoc]] TFRagTokenForGeneration - call - generate </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/rag.md/0
{ "file_path": "transformers/docs/source/en/model_doc/rag.md", "repo_id": "transformers", "token_count": 1273 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the MIT License; you may not use this file except in compliance with the License. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # SuperPoint ## Overview The SuperPoint model was proposed in [SuperPoint: Self-Supervised Interest Point Detection and Description](https://arxiv.org/abs/1712.07629) by Daniel DeTone, Tomasz Malisiewicz and Andrew Rabinovich. This model is the result of a self-supervised training of a fully-convolutional network for interest point detection and description. The model is able to detect interest points that are repeatable under homographic transformations and provide a descriptor for each point. The use of the model in its own is limited, but it can be used as a feature extractor for other tasks such as homography estimation, image matching, etc. The abstract from the paper is the following: *This paper presents a self-supervised framework for training interest point detectors and descriptors suitable for a large number of multiple-view geometry problems in computer vision. As opposed to patch-based neural networks, our fully-convolutional model operates on full-sized images and jointly computes pixel-level interest point locations and associated descriptors in one forward pass. We introduce Homographic Adaptation, a multi-scale, multi-homography approach for boosting interest point detection repeatability and performing cross-domain adaptation (e.g., synthetic-to-real). Our model, when trained on the MS-COCO generic image dataset using Homographic Adaptation, is able to repeatedly detect a much richer set of interest points than the initial pre-adapted deep model and any other traditional corner detector. The final system gives rise to state-of-the-art homography estimation results on HPatches when compared to LIFT, SIFT and ORB.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/superpoint_architecture.png" alt="drawing" width="500"/> <small> SuperPoint overview. Taken from the <a href="https://arxiv.org/abs/1712.07629v4">original paper.</a> </small> ## Usage tips Here is a quick example of using the model to detect interest points in an image: ```python from transformers import AutoImageProcessor, SuperPointForKeypointDetection import torch from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint") model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint") inputs = processor(image, return_tensors="pt") outputs = model(**inputs) ``` The outputs contain the list of keypoint coordinates with their respective score and description (a 256-long vector). You can also feed multiple images to the model. Due to the nature of SuperPoint, to output a dynamic number of keypoints, you will need to use the mask attribute to retrieve the respective information : ```python from transformers import AutoImageProcessor, SuperPointForKeypointDetection import torch from PIL import Image import requests url_image_1 = "http://images.cocodataset.org/val2017/000000039769.jpg" image_1 = Image.open(requests.get(url_image_1, stream=True).raw) url_image_2 = "http://images.cocodataset.org/test-stuff2017/000000000568.jpg" image_2 = Image.open(requests.get(url_image_2, stream=True).raw) images = [image_1, image_2] processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint") model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint") inputs = processor(images, return_tensors="pt") outputs = model(**inputs) image_sizes = [(image.height, image.width) for image in images] outputs = processor.post_process_keypoint_detection(outputs, image_sizes) for output in outputs: for keypoints, scores, descriptors in zip(output["keypoints"], output["scores"], output["descriptors"]): print(f"Keypoints: {keypoints}") print(f"Scores: {scores}") print(f"Descriptors: {descriptors}") ``` You can then print the keypoints on the image of your choice to visualize the result: ```python import matplotlib.pyplot as plt plt.axis("off") plt.imshow(image_1) plt.scatter( outputs[0]["keypoints"][:, 0], outputs[0]["keypoints"][:, 1], c=outputs[0]["scores"] * 100, s=outputs[0]["scores"] * 50, alpha=0.8 ) plt.savefig(f"output_image.png") ``` ![image/png](https://cdn-uploads.huggingface.co/production/uploads/632885ba1558dac67c440aa8/ZtFmphEhx8tcbEQqOolyE.png) This model was contributed by [stevenbucaille](https://huggingface.co/stevenbucaille). The original code can be found [here](https://github.com/magicleap/SuperPointPretrainedNetwork). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with SuperPoint. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. - A notebook showcasing inference and visualization with SuperPoint can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SuperPoint/Inference_with_SuperPoint_to_detect_interest_points_in_an_image.ipynb). 🌎 ## SuperPointConfig [[autodoc]] SuperPointConfig ## SuperPointImageProcessor [[autodoc]] SuperPointImageProcessor - preprocess - post_process_keypoint_detection ## SuperPointForKeypointDetection [[autodoc]] SuperPointForKeypointDetection - forward
transformers/docs/source/en/model_doc/superpoint.md/0
{ "file_path": "transformers/docs/source/en/model_doc/superpoint.md", "repo_id": "transformers", "token_count": 1769 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Transformer XL <Tip warning={true}> This model is in maintenance mode only, so we won't accept any new PRs changing its code. This model was deprecated due to security issues linked to `pickle.load`. We recommend switching to more recent models for improved security. In case you would still like to use `TransfoXL` in your experiments, we recommend using the [Hub checkpoint](https://huggingface.co/transfo-xl/transfo-xl-wt103) with a specific revision to ensure you are downloading safe files from the Hub. You will need to set the environment variable `TRUST_REMOTE_CODE` to `True` in order to allow the usage of `pickle.load()`: ```python import os from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel os.environ["TRUST_REMOTE_CODE"] = "True" checkpoint = 'transfo-xl/transfo-xl-wt103' revision = '40a186da79458c9f9de846edfaea79c412137f97' tokenizer = TransfoXLTokenizer.from_pretrained(checkpoint, revision=revision) model = TransfoXLLMHeadModel.from_pretrained(checkpoint, revision=revision) ``` If you run into any issues running this model, please reinstall the last version that supported this model: v4.35.0. You can do so by running the following command: `pip install -U transformers==4.35.0`. </Tip> <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=transfo-xl"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-transfo--xl-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/transfo-xl-wt103"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The Transformer-XL model was proposed in [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. It's a causal (uni-directional) transformer with relative positioning (sinusoïdal) embeddings which can reuse previously computed hidden-states to attend to longer context (memory). This model also uses adaptive softmax inputs and outputs (tied). The abstract from the paper is the following: *Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450% longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens.* This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/kimiyoung/transformer-xl). ## Usage tips - Transformer-XL uses relative sinusoidal positional embeddings. Padding can be done on the left or on the right. The original implementation trains on SQuAD with padding on the left, therefore the padding defaults are set to left. - Transformer-XL is one of the few models that has no sequence length limit. - Same as a regular GPT model, but introduces a recurrence mechanism for two consecutive segments (similar to a regular RNNs with two consecutive inputs). In this context, a segment is a number of consecutive tokens (for instance 512) that may span across multiple documents, and segments are fed in order to the model. - Basically, the hidden states of the previous segment are concatenated to the current input to compute the attention scores. This allows the model to pay attention to information that was in the previous segment as well as the current one. By stacking multiple attention layers, the receptive field can be increased to multiple previous segments. - This changes the positional embeddings to positional relative embeddings (as the regular positional embeddings would give the same results in the current input and the current hidden state at a given position) and needs to make some adjustments in the way attention scores are computed. <Tip warning={true}> TransformerXL does **not** work with *torch.nn.DataParallel* due to a bug in PyTorch, see [issue #36035](https://github.com/pytorch/pytorch/issues/36035) </Tip> ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Causal language modeling task guide](../tasks/language_modeling) ## TransfoXLConfig [[autodoc]] TransfoXLConfig ## TransfoXLTokenizer [[autodoc]] TransfoXLTokenizer - save_vocabulary ## TransfoXL specific outputs [[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput [[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput [[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput [[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput <frameworkcontent> <pt> ## TransfoXLModel [[autodoc]] TransfoXLModel - forward ## TransfoXLLMHeadModel [[autodoc]] TransfoXLLMHeadModel - forward ## TransfoXLForSequenceClassification [[autodoc]] TransfoXLForSequenceClassification - forward </pt> <tf> ## TFTransfoXLModel [[autodoc]] TFTransfoXLModel - call ## TFTransfoXLLMHeadModel [[autodoc]] TFTransfoXLLMHeadModel - call ## TFTransfoXLForSequenceClassification [[autodoc]] TFTransfoXLForSequenceClassification - call </tf> </frameworkcontent> ## Internal Layers [[autodoc]] AdaptiveEmbedding [[autodoc]] TFAdaptiveEmbedding
transformers/docs/source/en/model_doc/transfo-xl.md/0
{ "file_path": "transformers/docs/source/en/model_doc/transfo-xl.md", "repo_id": "transformers", "token_count": 2000 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Vision Encoder Decoder Models ## Overview The [`VisionEncoderDecoderModel`] can be used to initialize an image-to-text model with any pretrained Transformer-based vision model as the encoder (*e.g.* [ViT](vit), [BEiT](beit), [DeiT](deit), [Swin](swin)) and any pretrained language model as the decoder (*e.g.* [RoBERTa](roberta), [GPT2](gpt2), [BERT](bert), [DistilBERT](distilbert)). The effectiveness of initializing image-to-text-sequence models with pretrained checkpoints has been shown in (for example) [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. After such a [`VisionEncoderDecoderModel`] has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples below for more information). An example application is image captioning, in which the encoder is used to encode the image, after which an autoregressive language model generates the caption. Another example is optical character recognition. Refer to [TrOCR](trocr), which is an instance of [`VisionEncoderDecoderModel`]. ## Randomly initializing `VisionEncoderDecoderModel` from model configurations. [`VisionEncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`ViTModel`] configuration for the encoder and the default [`BertForCausalLM`] configuration for the decoder. ```python >>> from transformers import BertConfig, ViTConfig, VisionEncoderDecoderConfig, VisionEncoderDecoderModel >>> config_encoder = ViTConfig() >>> config_decoder = BertConfig() >>> config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) >>> model = VisionEncoderDecoderModel(config=config) ``` ## Initialising `VisionEncoderDecoderModel` from a pretrained encoder and a pretrained decoder. [`VisionEncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained Transformer-based vision model, *e.g.* [Swin](swin), can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder. Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized. Initializing [`VisionEncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder). To do so, the `VisionEncoderDecoderModel` class provides a [`VisionEncoderDecoderModel.from_encoder_decoder_pretrained`] method. ```python >>> from transformers import VisionEncoderDecoderModel >>> model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( ... "microsoft/swin-base-patch4-window7-224-in22k", "google-bert/bert-base-uncased" ... ) ``` ## Loading an existing `VisionEncoderDecoderModel` checkpoint and perform inference. To load fine-tuned checkpoints of the `VisionEncoderDecoderModel` class, [`VisionEncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers. To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling. ```python >>> import requests >>> from PIL import Image >>> from transformers import GPT2TokenizerFast, ViTImageProcessor, VisionEncoderDecoderModel >>> # load a fine-tuned image captioning model and corresponding tokenizer and image processor >>> model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") >>> tokenizer = GPT2TokenizerFast.from_pretrained("nlpconnect/vit-gpt2-image-captioning") >>> image_processor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning") >>> # let's perform inference on an image >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> pixel_values = image_processor(image, return_tensors="pt").pixel_values >>> # autoregressively generate caption (uses greedy decoding by default) >>> generated_ids = model.generate(pixel_values) >>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) a cat laying on a blanket next to a cat laying on a bed ``` ## Loading a PyTorch checkpoint into `TFVisionEncoderDecoderModel`. [`TFVisionEncoderDecoderModel.from_pretrained`] currently doesn't support initializing the model from a PyTorch checkpoint. Passing `from_pt=True` to this method will throw an exception. If there are only PyTorch checkpoints for a particular vision encoder-decoder model, a workaround is: ```python >>> from transformers import VisionEncoderDecoderModel, TFVisionEncoderDecoderModel >>> _model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") >>> _model.encoder.save_pretrained("./encoder") >>> _model.decoder.save_pretrained("./decoder") >>> model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( ... "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True ... ) >>> # This is only for copying some specific attributes of this particular model. >>> model.config = _model.config ``` ## Training Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model on a dataset of (image, text) pairs. As you can see, only 2 inputs are required for the model in order to compute a loss: `pixel_values` (which are the images) and `labels` (which are the `input_ids` of the encoded target sequence). ```python >>> from transformers import ViTImageProcessor, BertTokenizer, VisionEncoderDecoderModel >>> from datasets import load_dataset >>> image_processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained( ... "google/vit-base-patch16-224-in21k", "google-bert/bert-base-uncased" ... ) >>> model.config.decoder_start_token_id = tokenizer.cls_token_id >>> model.config.pad_token_id = tokenizer.pad_token_id >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> pixel_values = image_processor(image, return_tensors="pt").pixel_values >>> labels = tokenizer( ... "an image of two cats chilling on a couch", ... return_tensors="pt", ... ).input_ids >>> # the forward function automatically creates the correct decoder_input_ids >>> loss = model(pixel_values=pixel_values, labels=labels).loss ``` This model was contributed by [nielsr](https://github.com/nielsrogge). This model's TensorFlow and Flax versions were contributed by [ydshieh](https://github.com/ydshieh). ## VisionEncoderDecoderConfig [[autodoc]] VisionEncoderDecoderConfig <frameworkcontent> <pt> ## VisionEncoderDecoderModel [[autodoc]] VisionEncoderDecoderModel - forward - from_encoder_decoder_pretrained </pt> <tf> ## TFVisionEncoderDecoderModel [[autodoc]] TFVisionEncoderDecoderModel - call - from_encoder_decoder_pretrained </tf> <jax> ## FlaxVisionEncoderDecoderModel [[autodoc]] FlaxVisionEncoderDecoderModel - __call__ - from_encoder_decoder_pretrained </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/vision-encoder-decoder.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vision-encoder-decoder.md", "repo_id": "transformers", "token_count": 2537 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # WavLM ## Overview The WavLM model was proposed in [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. The abstract from the paper is the following: *Self-supervised learning (SSL) achieves great success in speech recognition, while limited exploration has been attempted for other speech processing tasks. As speech signal contains multi-faceted information including speaker identity, paralinguistics, spoken content, etc., learning universal representations for all speech tasks is challenging. In this paper, we propose a new pre-trained model, WavLM, to solve full-stack downstream speech tasks. WavLM is built based on the HuBERT framework, with an emphasis on both spoken content modeling and speaker identity preservation. We first equip the Transformer structure with gated relative position bias to improve its capability on recognition tasks. For better speaker discrimination, we propose an utterance mixing training strategy, where additional overlapped utterances are created unsupervisedly and incorporated during model training. Lastly, we scale up the training dataset from 60k hours to 94k hours. WavLM Large achieves state-of-the-art performance on the SUPERB benchmark, and brings significant improvements for various speech processing tasks on their representative benchmarks.* Relevant checkpoints can be found under https://huggingface.co/models?other=wavlm. This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be found [here](https://github.com/microsoft/unilm/tree/master/wavlm). ## Usage tips - WavLM is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use [`Wav2Vec2Processor`] for the feature extraction. - WavLM model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. - WavLM performs especially well on speaker verification, speaker identification, and speaker diarization tasks. ## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) ## WavLMConfig [[autodoc]] WavLMConfig ## WavLMModel [[autodoc]] WavLMModel - forward ## WavLMForCTC [[autodoc]] WavLMForCTC - forward ## WavLMForSequenceClassification [[autodoc]] WavLMForSequenceClassification - forward ## WavLMForAudioFrameClassification [[autodoc]] WavLMForAudioFrameClassification - forward ## WavLMForXVector [[autodoc]] WavLMForXVector - forward
transformers/docs/source/en/model_doc/wavlm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/wavlm.md", "repo_id": "transformers", "token_count": 972 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Zamba2 Zamba2 is a large language model (LLM) trained by Zyphra, and made available under an Apache 2.0 license. Please see the [Zyphra Hugging Face](https://huggingface.co/collections/zyphra/) repository for model weights. This model was contributed by [pglo](https://huggingface.co/pglo). ## Model details Zamba2-1.2B, Zamba2-2.7B and Zamba2-7B are hybrid models combining state-space models (Specifically [Mamba](https://github.com/state-spaces/mamba)) and transformer, and were trained using next-token prediction. Zamba2 uses shared transformer layers after every 6 mamba blocks. It uses the [Mistral v0.1 tokenizer](https://huggingface.co/mistralai/Mistral-7B-v0.1). We came to this architecture after a series of ablations at small scales. Zamba2-1.2B, Zamba2-2.7B and Zamba2-7B were pre-trained on 2T and 3T tokens, respectively. <img src=https://github.com/user-attachments/assets/c2cff209-b901-483c-87aa-774b82a0769f width=30% height=40% /> ## Quick start ### Presequities Zamba2 requires you use `transformers` version 4.48.0 or higher: ```bash pip install transformers>=4.48.0 ``` ## Inference ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-7B") model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-7B", device_map="cuda", torch_dtype=torch.bfloat16) input_text = "What factors contributed to the fall of the Roman Empire?" input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=100) print(tokenizer.decode(outputs[0])) ``` ## Model card The model cards can be found at: * [Zamba2-1.2B](https://huggingface.co/Zyphra/Zamba2-1.2B) * [Zamba2-2.7B](https://huggingface.co/Zyphra/Zamba2-2.7B) * [Zamba2-7B](https://huggingface.co/Zyphra/Zamba2-7B) ## Issues For issues with model output, or community discussion, please use the Hugging Face community [forum](https://huggingface.co/Zyphra/Zamba2-7B/discussions) ## License The model weights are open-sourced via an Apache 2.0 license. ## Zamba2Config [[autodoc]] Zamba2Config ## Zamba2Model [[autodoc]] Zamba2Model - forward ## Zamba2ForCausalLM [[autodoc]] Zamba2ForCausalLM - forward ## Zamba2ForSequenceClassification [[autodoc]] transformers.Zamba2ForSequenceClassification - forward
transformers/docs/source/en/model_doc/zamba2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/zamba2.md", "repo_id": "transformers", "token_count": 1035 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Efficient Training on Multiple CPUs When training on a single CPU is too slow, we can use multiple CPUs. This guide focuses on PyTorch-based DDP enabling distributed CPU training efficiently on [bare metal](#usage-in-trainer) and [Kubernetes](#usage-with-kubernetes). ## Intel® oneCCL Bindings for PyTorch [Intel® oneCCL](https://github.com/oneapi-src/oneCCL) (collective communications library) is a library for efficient distributed deep learning training implementing such collectives like allreduce, allgather, alltoall. For more information on oneCCL, please refer to the [oneCCL documentation](https://spec.oneapi.com/versions/latest/elements/oneCCL/source/index.html) and [oneCCL specification](https://spec.oneapi.com/versions/latest/elements/oneCCL/source/index.html). Module `oneccl_bindings_for_pytorch` (`torch_ccl` before version 1.12) implements PyTorch C10D ProcessGroup API and can be dynamically loaded as external ProcessGroup and only works on Linux platform now Check more detailed information for [oneccl_bind_pt](https://github.com/intel/torch-ccl). ### Intel® oneCCL Bindings for PyTorch installation Wheel files are available for the following Python versions: | Extension Version | Python 3.7 | Python 3.8 | Python 3.9 | Python 3.10 | Python 3.11 | | :---------------: | :--------: | :--------: | :--------: | :---------: | :---------: | | 2.5.0 | | √ | √ | √ | √ | | 2.4.0 | | √ | √ | √ | √ | | 2.3.0 | | √ | √ | √ | √ | | 2.2.0 | | √ | √ | √ | √ | Please run `pip list | grep torch` to get your `pytorch_version`. ```bash pip install oneccl_bind_pt=={pytorch_version} -f https://developer.intel.com/ipex-whl-stable-cpu ``` where `{pytorch_version}` should be your PyTorch version, for instance 2.4.0. Check more approaches for [oneccl_bind_pt installation](https://github.com/intel/torch-ccl). Versions of oneCCL and PyTorch must match. ## Intel® MPI library Use this standards-based MPI implementation to deliver flexible, efficient, scalable cluster messaging on Intel® architecture. This component is part of the Intel® oneAPI HPC Toolkit. oneccl_bindings_for_pytorch is installed along with the MPI tool set. Need to source the environment before using it. ```bash oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)") source $oneccl_bindings_for_pytorch_path/env/setvars.sh ``` #### Intel® Extension for PyTorch installation Intel Extension for PyTorch (IPEX) provides performance optimizations for CPU training with both Float32 and BFloat16 (refer to the [single CPU section](./perf_train_cpu) to learn more). The following "Usage in Trainer" takes mpirun in Intel® MPI library as an example. ## Usage in Trainer To enable multi CPU distributed training in the Trainer with the ccl backend, users should add **`--ddp_backend ccl`** in the command arguments. Let's see an example with the [question-answering example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) The following command enables training with 2 processes on one Xeon node, with one process running per one socket. The variables OMP_NUM_THREADS/CCL_WORKER_COUNT can be tuned for optimal performance. ```shell script export CCL_WORKER_COUNT=1 export MASTER_ADDR=127.0.0.1 mpirun -n 2 -genv OMP_NUM_THREADS=23 \ python3 examples/pytorch/question-answering/run_qa.py \ --model_name_or_path google-bert/bert-large-uncased \ --dataset_name squad \ --do_train \ --do_eval \ --per_device_train_batch_size 12 \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/debug_squad/ \ --no_cuda \ --ddp_backend ccl \ --use_ipex ``` The following command enables training with a total of four processes on two Xeons (node0 and node1, taking node0 as the main process), ppn (processes per node) is set to 2, with one process running per one socket. The variables OMP_NUM_THREADS/CCL_WORKER_COUNT can be tuned for optimal performance. In node0, you need to create a configuration file which contains the IP addresses of each node (for example hostfile) and pass that configuration file path as an argument. ```shell script cat hostfile xxx.xxx.xxx.xxx #node0 ip xxx.xxx.xxx.xxx #node1 ip ``` Now, run the following command in node0 and **4DDP** will be enabled in node0 and node1 with BF16 auto mixed precision: ```shell script export CCL_WORKER_COUNT=1 export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip mpirun -f hostfile -n 4 -ppn 2 \ -genv OMP_NUM_THREADS=23 \ python3 examples/pytorch/question-answering/run_qa.py \ --model_name_or_path google-bert/bert-large-uncased \ --dataset_name squad \ --do_train \ --do_eval \ --per_device_train_batch_size 12 \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/debug_squad/ \ --no_cuda \ --ddp_backend ccl \ --use_ipex \ --bf16 ``` ## Usage with Kubernetes The same distributed training job from the previous section can be deployed to a Kubernetes cluster using the [Kubeflow PyTorchJob training operator](https://www.kubeflow.org/docs/components/training/user-guides/pytorch). ### Setup This example assumes that you have: * Access to a Kubernetes cluster with [Kubeflow installed](https://www.kubeflow.org/docs/started/installing-kubeflow) * [`kubectl`](https://kubernetes.io/docs/tasks/tools) installed and configured to access the Kubernetes cluster * A [Persistent Volume Claim (PVC)](https://kubernetes.io/docs/concepts/storage/persistent-volumes) that can be used to store datasets and model files. There are multiple options for setting up the PVC including using an NFS [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes) or a cloud storage bucket. * A Docker container that includes your model training script and all the dependencies needed to run the script. For distributed CPU training jobs, this typically includes PyTorch, Transformers, Intel Extension for PyTorch, Intel oneCCL Bindings for PyTorch, and OpenSSH to communicate between the containers. The snippet below is an example of a Dockerfile that uses a base image that supports distributed CPU training and then extracts a Transformers release to the `/workspace` directory, so that the example scripts are included in the image: ```dockerfile FROM intel/intel-optimized-pytorch:2.4.0-pip-multinode RUN apt-get update -y && \ apt-get install -y --no-install-recommends --fix-missing \ google-perftools \ libomp-dev WORKDIR /workspace # Download and extract the transformers code ARG HF_TRANSFORMERS_VER="4.46.0" RUN pip install --no-cache-dir \ transformers==${HF_TRANSFORMERS_VER} && \ mkdir transformers && \ curl -sSL --retry 5 https://github.com/huggingface/transformers/archive/refs/tags/v${HF_TRANSFORMERS_VER}.tar.gz | tar -C transformers --strip-components=1 -xzf - ``` The image needs to be built and copied to the cluster's nodes or pushed to a container registry prior to deploying the PyTorchJob to the cluster. ### PyTorchJob Specification File The [Kubeflow PyTorchJob](https://www.kubeflow.org/docs/components/training/user-guides/pytorch) is used to run the distributed training job on the cluster. The yaml file for the PyTorchJob defines parameters such as: * The name of the PyTorchJob * The number of replicas (workers) * The python script and it's parameters that will be used to run the training job * The types of resources (node selector, memory, and CPU) needed for each worker * The image/tag for the Docker container to use * Environment variables * A volume mount for the PVC The volume mount defines a path where the PVC will be mounted in the container for each worker pod. This location can be used for the dataset, checkpoint files, and the saved model after training completes. The snippet below is an example of a yaml file for a PyTorchJob with 4 workers running the [question-answering example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering). ```yaml apiVersion: "kubeflow.org/v1" kind: PyTorchJob metadata: name: transformers-pytorchjob spec: elasticPolicy: rdzvBackend: c10d minReplicas: 1 maxReplicas: 4 maxRestarts: 10 pytorchReplicaSpecs: Worker: replicas: 4 # The number of worker pods restartPolicy: OnFailure template: spec: containers: - name: pytorch image: <image name>:<tag> # Specify the docker image to use for the worker pods imagePullPolicy: IfNotPresent command: ["/bin/bash", "-c"] args: - >- cd /workspace/transformers; pip install -r /workspace/transformers/examples/pytorch/question-answering/requirements.txt; source /usr/local/lib/python3.10/dist-packages/oneccl_bindings_for_pytorch/env/setvars.sh; torchrun /workspace/transformers/examples/pytorch/question-answering/run_qa.py \ --model_name_or_path distilbert/distilbert-base-uncased \ --dataset_name squad \ --do_train \ --do_eval \ --per_device_train_batch_size 12 \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/pvc-mount/output_$(date +%Y%m%d_%H%M%S) \ --no_cuda \ --ddp_backend ccl \ --bf16 \ --use_ipex; env: - name: LD_PRELOAD value: "/usr/lib/x86_64-linux-gnu/libtcmalloc.so.4.5.9:/usr/local/lib/libiomp5.so" - name: TRANSFORMERS_CACHE value: "/tmp/pvc-mount/transformers_cache" - name: HF_DATASETS_CACHE value: "/tmp/pvc-mount/hf_datasets_cache" - name: LOGLEVEL value: "INFO" - name: CCL_WORKER_COUNT value: "1" - name: OMP_NUM_THREADS # Can be tuned for optimal performance value: "240" resources: limits: cpu: 240 # Update the CPU and memory limit values based on your nodes memory: 128Gi requests: cpu: 240 # Update the CPU and memory request values based on your nodes memory: 128Gi volumeMounts: - name: pvc-volume mountPath: /tmp/pvc-mount - mountPath: /dev/shm name: dshm restartPolicy: Never nodeSelector: # Optionally use nodeSelector to match a certain node label for the worker pods node-type: gnr volumes: - name: pvc-volume persistentVolumeClaim: claimName: transformers-pvc - name: dshm emptyDir: medium: Memory ``` To run this example, update the yaml based on your training script and the nodes in your cluster. <Tip> The CPU resource limits/requests in the yaml are defined in [cpu units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) where 1 CPU unit is equivalent to 1 physical CPU core or 1 virtual core (depending on whether the node is a physical host or a VM). The amount of CPU and memory limits/requests defined in the yaml should be less than the amount of available CPU/memory capacity on a single machine. It is usually a good idea to not use the entire machine's capacity in order to leave some resources for the kubelet and OS. In order to get ["guaranteed"](https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#guaranteed) [quality of service](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod) for the worker pods, set the same CPU and memory amounts for both the resource limits and requests. </Tip> ### Deploy After the PyTorchJob spec has been updated with values appropriate for your cluster and training job, it can be deployed to the cluster using: ```bash export NAMESPACE=<specify your namespace> kubectl create -f pytorchjob.yaml -n ${NAMESPACE} ``` The `kubectl get pods -n ${NAMESPACE}` command can then be used to list the pods in your namespace. You should see the worker pods for the PyTorchJob that was just deployed. At first, they will probably have a status of "Pending" as the containers get pulled and created, then the status should change to "Running". ``` NAME READY STATUS RESTARTS AGE ... transformers-pytorchjob-worker-0 1/1 Running 0 7m37s transformers-pytorchjob-worker-1 1/1 Running 0 7m37s transformers-pytorchjob-worker-2 1/1 Running 0 7m37s transformers-pytorchjob-worker-3 1/1 Running 0 7m37s ... ``` The logs for worker can be viewed using `kubectl logs <pod name> -n ${NAMESPACE}`. Add `-f` to stream the logs, for example: ```bash kubectl logs transformers-pytorchjob-worker-0 -n ${NAMESPACE} -f ``` After the training job completes, the trained model can be copied from the PVC or storage location. When you are done with the job, the PyTorchJob resource can be deleted from the cluster using `kubectl delete -f pytorchjob.yaml -n ${NAMESPACE}`. ## Summary This guide covered running distributed PyTorch training jobs using multiple CPUs on bare metal and on a Kubernetes cluster. Both cases utilize Intel Extension for PyTorch and Intel oneCCL Bindings for PyTorch for optimal training performance, and can be used as a template to run your own workload on multiple nodes.
transformers/docs/source/en/perf_train_cpu_many.md/0
{ "file_path": "transformers/docs/source/en/perf_train_cpu_many.md", "repo_id": "transformers", "token_count": 5718 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Compressed Tensors The [`compressed-tensors`](https://github.com/neuralmagic/compressed-tensors) library provides a versatile and efficient way to store and manage compressed model checkpoints. This library supports various quantization and sparsity schemes, making it a unified format for handling different model optimizations like GPTQ, AWQ, SmoothQuant, INT8, FP8, SparseGPT, and more. Some of the supported formats include: 1. `dense` 2. `int-quantized` ([sample](https://huggingface.co/nm-testing/tinyllama-w8a8-compressed-hf-quantizer)): INT8 quantized models 3. `float-quantized` ([sample](https://huggingface.co/nm-testing/Meta-Llama-3-8B-Instruct-fp8-hf_compat)): FP8 quantized models; currently support E4M3 4. `pack-quantized` ([sample](https://huggingface.co/nm-testing/tinyllama-w4a16-compressed-hf-quantizer)): INT4 or INT8 weight-quantized models, packed into INT32. For INT4, the weights have an INT4 range but are stored as INT8 and then packed into INT32. Compressed models can be easily created using [llm-compressor](https://github.com/vllm-project/llm-compressor). Alternatively models can be created independently and serialized with a compressed tensors config. To find existing models on the Hugging Face Model Hub, search for the [`compressed-tensors` tag](https://huggingface.co/models?other=compressed-tensors). #### Features: - Weight and activation precisions: FP8, INT4, INT8 (for Q/DQ arbitrary precision is allowed for INT) - Quantization scales and zero-points strategies: [tensor, channel, group, block, token](https://github.com/neuralmagic/compressed-tensors/blob/83b2e7a969d70606421a76b9a3d112646077c8de/src/compressed_tensors/quantization/quant_args.py#L43-L52) - Dynamic per-token activation quantization (or any static strategy) - Sparsity in weights (unstructured or semi-structured like 2:4) can be composed with quantization for extreme compression - Supports quantization of arbitrary modules, not just Linear modules - Targeted support or ignoring of modules by name or class ## Installation It is recommended to install stable releases of compressed-tensors from [PyPI](https://pypi.org/project/compressed-tensors): ```bash pip install compressed-tensors ``` Developers who want to experiment with the latest features can also install the package from source: ```bash git clone https://github.com/neuralmagic/compressed-tensors cd compressed-tensors pip install -e . ``` ## Quickstart Model Load Quantized models can be easily loaded for inference as shown below. Only models that have already been quantized can be loaded at the moment. To quantize a model into the compressed-tensors format see [llm-compressor](https://github.com/vllm-project/llm-compressor). ```python from transformers import AutoModelForCausalLM # Load the model in compressed-tensors format ct_model = AutoModelForCausalLM.from_pretrained("nm-testing/Meta-Llama-3.1-8B-Instruct-FP8-hf") # Measure memory usage mem_params = sum([param.nelement()*param.element_size() for param in ct_model.parameters()]) print(f"{mem_params/2**30:.4f} GB") # 8.4575 GB ``` We can see just above that the compressed-tensors FP8 checkpoint of Llama 3.1 8B is able to be loaded for inference using half of the memory of the unquantized reference checkpoint. ## Sample Use Cases - Load and run an FP8 model ```python from transformers import AutoModelForCausalLM, AutoTokenizer prompt = [ "Hello, my name is", "The capital of France is", "The future of AI is" ] model_name = "nm-testing/Meta-Llama-3-8B-Instruct-fp8-hf_compat" quantized_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") tokenizer = AutoTokenizer.from_pretrained(model_name) inputs = tokenizer(prompt, return_tensors="pt") generated_ids = quantized_model.generate(**inputs, max_length=50, do_sample=False) outputs = tokenizer.batch_decode(generated_ids) print(outputs) """ ['<|begin_of_text|>Hello, my name is [Name]. I am a [Your Profession/Student] and I am here to learn about the [Course/Program] at [University/Institution]. I am excited to be here and I am looking forward to', '<|begin_of_text|>The capital of France is Paris, which is located in the north-central part of the country. Paris is the most populous city in France and is known for its stunning architecture, art museums, fashion, and romantic atmosphere. The city is home to', "<|begin_of_text|>The future of AI is here, and it's already changing the way we live and work. From virtual assistants to self-driving cars, AI is transforming industries and revolutionizing the way we interact with technology. But what does the future of AI hold"] """ ``` The above shows a quick example for running generation using a `compressed-tensors` model. Currently, once loaded the model cannot be saved. ## Deep dive into a compressed-tensors model checkpoint In this example we will examine how the compressed-tensors model nm-testing/Meta-Llama-3.1-8B-Instruct-FP8-hf is defined through its configuration entry and see how this translates to the loaded model representation. First, let us look at the [`quantization_config` of the model](https://huggingface.co/nm-testing/Meta-Llama-3.1-8B-Instruct-FP8-hf/blob/main/config.json). At a glance it looks overwhelming with the number of entries but this is because compressed-tensors is a format that allows for flexible expression both during and after model compression. In practice for checkpoint loading and inference the configuration can be simplified to not include all the default or empty entries, so we will do that here to focus on what compression is actually represented. ```yaml "quantization_config": { "config_groups": { "group_0": { "input_activations": { "num_bits": 8, "strategy": "tensor", "type": "float" }, "targets": ["Linear"], "weights": { "num_bits": 8, "strategy": "tensor", "type": "float" } } }, "format": "naive-quantized", "ignore": ["lm_head"], "quant_method": "compressed-tensors", "quantization_status": "frozen" }, ``` We can see from the above configuration that it is specifying one config group that includes weight and activation quantization to FP8 with a static per-tensor strategy. It is also worth noting that in the `ignore` list there is an entry to skip quantization of the `lm_head` module, so that module should be untouched in the checkpoint. To see the result of the configuration in practice, we can simply use the [safetensors viewer](https://huggingface.co/nm-testing/Meta-Llama-3.1-8B-Instruct-FP8-hf?show_file_info=model.safetensors.index.json) on the model card to see the quantized weights, input_scale, and weight_scale for all of the Linear modules in the first model layer (and so on for the rest of the layers). | Tensors | Shape | Precision | | ------- | ----- | --------- | model.layers.0.input_layernorm.weight | [4 096] | BF16 model.layers.0.mlp.down_proj.input_scale | [1] | BF16 model.layers.0.mlp.down_proj.weight | [4 096, 14 336] | F8_E4M3 model.layers.0.mlp.down_proj.weight_scale | [1] | BF16 model.layers.0.mlp.gate_proj.input_scale | [1] | BF16 model.layers.0.mlp.gate_proj.weight | [14 336, 4 096] | F8_E4M3 model.layers.0.mlp.gate_proj.weight_scale | [1] | BF16 model.layers.0.mlp.up_proj.input_scale| [1] |BF16 model.layers.0.mlp.up_proj.weight | [14 336, 4 096] | F8_E4M3 model.layers.0.mlp.up_proj.weight_scale | [1] | BF16 model.layers.0.post_attention_layernorm.weight | [4 096] |BF16 model.layers.0.self_attn.k_proj.input_scale | [1] | BF16 model.layers.0.self_attn.k_proj.weight | [1 024, 4 096]| F8_E4M3 model.layers.0.self_attn.k_proj.weight_scale |[1] | BF16 model.layers.0.self_attn.o_proj.input_scale | [1] | BF16 model.layers.0.self_attn.o_proj.weight | [4 096, 4 096] | F8_E4M3 model.layers.0.self_attn.o_proj.weight_scale | [1] | BF16 model.layers.0.self_attn.q_proj.input_scale | [1] | BF16 model.layers.0.self_attn.q_proj.weight | [4 096, 4 096] | F8_E4M3 model.layers.0.self_attn.q_proj.weight_scale | [1] | BF16 model.layers.0.self_attn.v_proj.input_scale | [1] | BF16 model.layers.0.self_attn.v_proj.weight | [1 024, 4 096] | F8_E4M3 model.layers.0.self_attn.v_proj.weight_scale | [1] | BF16 When we load the model with the compressed-tensors HFQuantizer integration, we can see that all of the Linear modules that are specified within the quantization configuration have been replaced by `CompressedLinear` modules that manage the compressed weights and forward pass for inference. Note that the `lm_head` mentioned before in the ignore list is still kept as an unquantized Linear module. ```python from transformers import AutoModelForCausalLM ct_model = AutoModelForCausalLM.from_pretrained("nm-testing/Meta-Llama-3.1-8B-Instruct-FP8-hf") print(ct_model) """ LlamaForCausalLM( (model): LlamaModel( (embed_tokens): Embedding(128256, 4096) (layers): ModuleList( (0-31): 32 x LlamaDecoderLayer( (self_attn): LlamaSdpaAttention( (q_proj): CompressedLinear( in_features=4096, out_features=4096, bias=False (input_observer): MovingAverageMinMaxObserver() (weight_observer): MovingAverageMinMaxObserver() ) (k_proj): CompressedLinear( in_features=4096, out_features=1024, bias=False (input_observer): MovingAverageMinMaxObserver() (weight_observer): MovingAverageMinMaxObserver() ) (v_proj): CompressedLinear( in_features=4096, out_features=1024, bias=False (input_observer): MovingAverageMinMaxObserver() (weight_observer): MovingAverageMinMaxObserver() ) (o_proj): CompressedLinear( in_features=4096, out_features=4096, bias=False (input_observer): MovingAverageMinMaxObserver() (weight_observer): MovingAverageMinMaxObserver() ) (rotary_emb): LlamaRotaryEmbedding() ) (mlp): LlamaMLP( (gate_proj): CompressedLinear( in_features=4096, out_features=14336, bias=False (input_observer): MovingAverageMinMaxObserver() (weight_observer): MovingAverageMinMaxObserver() ) (up_proj): CompressedLinear( in_features=4096, out_features=14336, bias=False (input_observer): MovingAverageMinMaxObserver() (weight_observer): MovingAverageMinMaxObserver() ) (down_proj): CompressedLinear( in_features=14336, out_features=4096, bias=False (input_observer): MovingAverageMinMaxObserver() (weight_observer): MovingAverageMinMaxObserver() ) (act_fn): SiLU() ) (input_layernorm): LlamaRMSNorm((4096,), eps=1e-05) (post_attention_layernorm): LlamaRMSNorm((4096,), eps=1e-05) ) ) (norm): LlamaRMSNorm((4096,), eps=1e-05) (rotary_emb): LlamaRotaryEmbedding() ) (lm_head): Linear(in_features=4096, out_features=128256, bias=False) ) """ ```
transformers/docs/source/en/quantization/compressed_tensors.md/0
{ "file_path": "transformers/docs/source/en/quantization/compressed_tensors.md", "repo_id": "transformers", "token_count": 4297 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # What 🤗 Transformers can do 🤗 Transformers is a library of pretrained state-of-the-art models for natural language processing (NLP), computer vision, and audio and speech processing tasks. Not only does the library contain Transformer models, but it also has non-Transformer models like modern convolutional networks for computer vision tasks. If you look at some of the most popular consumer products today, like smartphones, apps, and televisions, odds are that some kind of deep learning technology is behind it. Want to remove a background object from a picture taken by your smartphone? This is an example of a panoptic segmentation task (don't worry if you don't know what this means yet, we'll describe it in the following sections!). This page provides an overview of the different speech and audio, computer vision, and NLP tasks that can be solved with the 🤗 Transformers library in just three lines of code! ## Audio Audio and speech processing tasks are a little different from the other modalities mainly because audio as an input is a continuous signal. Unlike text, a raw audio waveform can't be neatly split into discrete chunks the way a sentence can be divided into words. To get around this, the raw audio signal is typically sampled at regular intervals. If you take more samples within an interval, the sampling rate is higher, and the audio more closely resembles the original audio source. Previous approaches preprocessed the audio to extract useful features from it. It is now more common to start audio and speech processing tasks by directly feeding the raw audio waveform to a feature encoder to extract an audio representation. This simplifies the preprocessing step and allows the model to learn the most essential features. ### Audio classification Audio classification is a task that labels audio data from a predefined set of classes. It is a broad category with many specific applications, some of which include: * acoustic scene classification: label audio with a scene label ("office", "beach", "stadium") * acoustic event detection: label audio with a sound event label ("car horn", "whale calling", "glass breaking") * tagging: label audio containing multiple sounds (birdsongs, speaker identification in a meeting) * music classification: label music with a genre label ("metal", "hip-hop", "country") ```py >>> from transformers import pipeline >>> classifier = pipeline(task="audio-classification", model="superb/hubert-base-superb-er") >>> preds = classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4532, 'label': 'hap'}, {'score': 0.3622, 'label': 'sad'}, {'score': 0.0943, 'label': 'neu'}, {'score': 0.0903, 'label': 'ang'}] ``` ### Automatic speech recognition Automatic speech recognition (ASR) transcribes speech into text. It is one of the most common audio tasks due partly to speech being such a natural form of human communication. Today, ASR systems are embedded in "smart" technology products like speakers, phones, and cars. We can ask our virtual assistants to play music, set reminders, and tell us the weather. But one of the key challenges Transformer architectures have helped with is in low-resource languages. By pretraining on large amounts of speech data, finetuning the model on only one hour of labeled speech data in a low-resource language can still produce high-quality results compared to previous ASR systems trained on 100x more labeled data. ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition", model="openai/whisper-small") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` ## Computer vision One of the first and earliest successful computer vision tasks was recognizing images of zip code numbers using a [convolutional neural network (CNN)](glossary#convolution). An image is composed of pixels, and each pixel has a numerical value. This makes it easy to represent an image as a matrix of pixel values. Each particular combination of pixel values describes the colors of an image. Two general ways computer vision tasks can be solved are: 1. Use convolutions to learn the hierarchical features of an image from low-level features to high-level abstract things. 2. Split an image into patches and use a Transformer to gradually learn how each image patch is related to each other to form an image. Unlike the bottom-up approach favored by a CNN, this is kind of like starting out with a blurry image and then gradually bringing it into focus. ### Image classification Image classification labels an entire image from a predefined set of classes. Like most classification tasks, there are many practical use cases for image classification, some of which include: * healthcare: label medical images to detect disease or monitor patient health * environment: label satellite images to monitor deforestation, inform wildland management or detect wildfires * agriculture: label images of crops to monitor plant health or satellite images for land use monitoring * ecology: label images of animal or plant species to monitor wildlife populations or track endangered species ```py >>> from transformers import pipeline >>> classifier = pipeline(task="image-classification") >>> preds = classifier( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.4335, 'label': 'lynx, catamount'} {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'} {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'} {'score': 0.0239, 'label': 'Egyptian cat'} {'score': 0.0229, 'label': 'tiger cat'} ``` ### Object detection Unlike image classification, object detection identifies multiple objects within an image and the objects' positions in an image (defined by the bounding box). Some example applications of object detection include: * self-driving vehicles: detect everyday traffic objects such as other vehicles, pedestrians, and traffic lights * remote sensing: disaster monitoring, urban planning, and weather forecasting * defect detection: detect cracks or structural damage in buildings, and manufacturing defects ```py >>> from transformers import pipeline >>> detector = pipeline(task="object-detection") >>> preds = detector( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"], "box": pred["box"]} for pred in preds] >>> preds [{'score': 0.9865, 'label': 'cat', 'box': {'xmin': 178, 'ymin': 154, 'xmax': 882, 'ymax': 598}}] ``` ### Image segmentation Image segmentation is a pixel-level task that assigns every pixel in an image to a class. It differs from object detection, which uses bounding boxes to label and predict objects in an image because segmentation is more granular. Segmentation can detect objects at a pixel-level. There are several types of image segmentation: * instance segmentation: in addition to labeling the class of an object, it also labels each distinct instance of an object ("dog-1", "dog-2") * panoptic segmentation: a combination of semantic and instance segmentation; it labels each pixel with a semantic class **and** each distinct instance of an object Segmentation tasks are helpful in self-driving vehicles to create a pixel-level map of the world around them so they can navigate safely around pedestrians and other vehicles. It is also useful for medical imaging, where the task's finer granularity can help identify abnormal cells or organ features. Image segmentation can also be used in ecommerce to virtually try on clothes or create augmented reality experiences by overlaying objects in the real world through your camera. ```py >>> from transformers import pipeline >>> segmenter = pipeline(task="image-segmentation") >>> preds = segmenter( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.9879, 'label': 'LABEL_184'} {'score': 0.9973, 'label': 'snow'} {'score': 0.9972, 'label': 'cat'} ``` ### Depth estimation Depth estimation predicts the distance of each pixel in an image from the camera. This computer vision task is especially important for scene understanding and reconstruction. For example, in self-driving cars, vehicles need to understand how far objects like pedestrians, traffic signs, and other vehicles are to avoid obstacles and collisions. Depth information is also helpful for constructing 3D representations from 2D images and can be used to create high-quality 3D representations of biological structures or buildings. There are two approaches to depth estimation: * stereo: depths are estimated by comparing two images of the same image from slightly different angles * monocular: depths are estimated from a single image ```py >>> from transformers import pipeline >>> depth_estimator = pipeline(task="depth-estimation") >>> preds = depth_estimator( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) ``` ## Natural language processing NLP tasks are among the most common types of tasks because text is such a natural way for us to communicate. To get text into a format recognized by a model, it needs to be tokenized. This means dividing a sequence of text into separate words or subwords (tokens) and then converting these tokens into numbers. As a result, you can represent a sequence of text as a sequence of numbers, and once you have a sequence of numbers, it can be input into a model to solve all sorts of NLP tasks! ### Text classification Like classification tasks in any modality, text classification labels a sequence of text (it can be sentence-level, a paragraph, or a document) from a predefined set of classes. There are many practical applications for text classification, some of which include: * sentiment analysis: label text according to some polarity like `positive` or `negative` which can inform and support decision-making in fields like politics, finance, and marketing * content classification: label text according to some topic to help organize and filter information in news and social media feeds (`weather`, `sports`, `finance`, etc.) ```py >>> from transformers import pipeline >>> classifier = pipeline(task="sentiment-analysis") >>> preds = classifier("Hugging Face is the best thing since sliced bread!") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.9991, 'label': 'POSITIVE'}] ``` ### Token classification In any NLP task, text is preprocessed by separating the sequence of text into individual words or subwords. These are known as [tokens](glossary#token). Token classification assigns each token a label from a predefined set of classes. Two common types of token classification are: * named entity recognition (NER): label a token according to an entity category like organization, person, location or date. NER is especially popular in biomedical settings, where it can label genes, proteins, and drug names. * part-of-speech tagging (POS): label a token according to its part-of-speech like noun, verb, or adjective. POS is useful for helping translation systems understand how two identical words are grammatically different (bank as a noun versus bank as a verb). ```py >>> from transformers import pipeline >>> classifier = pipeline(task="ner") >>> preds = classifier("Hugging Face is a French company based in New York City.") >>> preds = [ ... { ... "entity": pred["entity"], ... "score": round(pred["score"], 4), ... "index": pred["index"], ... "word": pred["word"], ... "start": pred["start"], ... "end": pred["end"], ... } ... for pred in preds ... ] >>> print(*preds, sep="\n") {'entity': 'I-ORG', 'score': 0.9968, 'index': 1, 'word': 'Hu', 'start': 0, 'end': 2} {'entity': 'I-ORG', 'score': 0.9293, 'index': 2, 'word': '##gging', 'start': 2, 'end': 7} {'entity': 'I-ORG', 'score': 0.9763, 'index': 3, 'word': 'Face', 'start': 8, 'end': 12} {'entity': 'I-MISC', 'score': 0.9983, 'index': 6, 'word': 'French', 'start': 18, 'end': 24} {'entity': 'I-LOC', 'score': 0.999, 'index': 10, 'word': 'New', 'start': 42, 'end': 45} {'entity': 'I-LOC', 'score': 0.9987, 'index': 11, 'word': 'York', 'start': 46, 'end': 50} {'entity': 'I-LOC', 'score': 0.9992, 'index': 12, 'word': 'City', 'start': 51, 'end': 55} ``` ### Question answering Question answering is another token-level task that returns an answer to a question, sometimes with context (open-domain) and other times without context (closed-domain). This task happens whenever we ask a virtual assistant something like whether a restaurant is open. It can also provide customer or technical support and help search engines retrieve the relevant information you're asking for. There are two common types of question answering: * extractive: given a question and some context, the answer is a span of text from the context the model must extract * abstractive: given a question and some context, the answer is generated from the context; this approach is handled by the [`Text2TextGenerationPipeline`] instead of the [`QuestionAnsweringPipeline`] shown below ```py >>> from transformers import pipeline >>> question_answerer = pipeline(task="question-answering") >>> preds = question_answerer( ... question="What is the name of the repository?", ... context="The name of the repository is huggingface/transformers", ... ) >>> print( ... f"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}" ... ) score: 0.9327, start: 30, end: 54, answer: huggingface/transformers ``` ### Summarization Summarization creates a shorter version of a text from a longer one while trying to preserve most of the meaning of the original document. Summarization is a sequence-to-sequence task; it outputs a shorter text sequence than the input. There are a lot of long-form documents that can be summarized to help readers quickly understand the main points. Legislative bills, legal and financial documents, patents, and scientific papers are a few examples of documents that could be summarized to save readers time and serve as a reading aid. Like question answering, there are two types of summarization: * extractive: identify and extract the most important sentences from the original text * abstractive: generate the target summary (which may include new words not in the input document) from the original text; the [`SummarizationPipeline`] uses the abstractive approach ```py >>> from transformers import pipeline >>> summarizer = pipeline(task="summarization") >>> summarizer( ... "In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention. For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles." ... ) [{'summary_text': ' The Transformer is the first sequence transduction model based entirely on attention . It replaces the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention . For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers .'}] ``` ### Translation Translation converts a sequence of text in one language to another. It is important in helping people from different backgrounds communicate with each other, help translate content to reach wider audiences, and even be a learning tool to help people learn a new language. Along with summarization, translation is a sequence-to-sequence task, meaning the model receives an input sequence and returns a target output sequence. In the early days, translation models were mostly monolingual, but recently, there has been increasing interest in multilingual models that can translate between many pairs of languages. ```py >>> from transformers import pipeline >>> text = "translate English to French: Hugging Face is a community-based open-source platform for machine learning." >>> translator = pipeline(task="translation", model="google-t5/t5-small") >>> translator(text) [{'translation_text': "Hugging Face est une tribune communautaire de l'apprentissage des machines."}] ``` ### Language modeling Language modeling is a task that predicts a word in a sequence of text. It has become a very popular NLP task because a pretrained language model can be finetuned for many other downstream tasks. Lately, there has been a lot of interest in large language models (LLMs) which demonstrate zero- or few-shot learning. This means the model can solve tasks it wasn't explicitly trained to do! Language models can be used to generate fluent and convincing text, though you need to be careful since the text may not always be accurate. There are two types of language modeling: * causal: the model's objective is to predict the next token in a sequence, and future tokens are masked ```py >>> from transformers import pipeline >>> prompt = "Hugging Face is a community-based open-source platform for machine learning." >>> generator = pipeline(task="text-generation") >>> generator(prompt) # doctest: +SKIP ``` * masked: the model's objective is to predict a masked token in a sequence with full access to the tokens in the sequence ```py >>> text = "Hugging Face is a community-based open-source <mask> for machine learning." >>> fill_mask = pipeline(task="fill-mask") >>> preds = fill_mask(text, top_k=1) >>> preds = [ ... { ... "score": round(pred["score"], 4), ... "token": pred["token"], ... "token_str": pred["token_str"], ... "sequence": pred["sequence"], ... } ... for pred in preds ... ] >>> preds [{'score': 0.224, 'token': 3944, 'token_str': ' tool', 'sequence': 'Hugging Face is a community-based open-source tool for machine learning.'}] ``` ## Multimodal Multimodal tasks require a model to process multiple data modalities (text, image, audio, video) to solve a particular problem. Image captioning is an example of a multimodal task where the model takes an image as input and outputs a sequence of text describing the image or some properties of the image. Although multimodal models work with different data types or modalities, internally, the preprocessing steps help the model convert all the data types into embeddings (vectors or list of numbers that holds meaningful information about the data). For a task like image captioning, the model learns relationships between image embeddings and text embeddings. ### Document question answering Document question answering is a task that answers natural language questions from a document. Unlike a token-level question answering task which takes text as input, document question answering takes an image of a document as input along with a question about the document and returns an answer. Document question answering can be used to parse structured documents and extract key information from it. In the example below, the total amount and change due can be extracted from a receipt. ```py >>> from transformers import pipeline >>> from PIL import Image >>> import requests >>> url = "https://huggingface.co/datasets/hf-internal-testing/example-documents/resolve/main/jpeg_images/2.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> doc_question_answerer = pipeline("document-question-answering", model="magorshunov/layoutlm-invoices") >>> preds = doc_question_answerer( ... question="What is the total amount?", ... image=image, ... ) >>> preds [{'score': 0.8531, 'answer': '17,000', 'start': 4, 'end': 4}] ``` Hopefully, this page has given you some more background information about all the types of tasks in each modality and the practical importance of each one. In the next [section](tasks_explained), you'll learn **how** 🤗 Transformers work to solve these tasks.
transformers/docs/source/en/task_summary.md/0
{ "file_path": "transformers/docs/source/en/task_summary.md", "repo_id": "transformers", "token_count": 5654 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Multiple choice [[open-in-colab]] A multiple choice task is similar to question answering, except several candidate answers are provided along with a context and the model is trained to select the correct answer. This guide will show you how to: 1. Finetune [BERT](https://huggingface.co/google-bert/bert-base-uncased) on the `regular` configuration of the [SWAG](https://huggingface.co/datasets/swag) dataset to select the best answer given multiple options and some context. 2. Use your finetuned model for inference. Before you begin, make sure you have all the necessary libraries installed: ```bash pip install transformers datasets evaluate ``` We encourage you to login to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to login: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load SWAG dataset Start by loading the `regular` configuration of the SWAG dataset from the 🤗 Datasets library: ```py >>> from datasets import load_dataset >>> swag = load_dataset("swag", "regular") ``` Then take a look at an example: ```py >>> swag["train"][0] {'ending0': 'passes by walking down the street playing their instruments.', 'ending1': 'has heard approaching them.', 'ending2': "arrives and they're outside dancing and asleep.", 'ending3': 'turns the lead singer watches the performance.', 'fold-ind': '3416', 'gold-source': 'gold', 'label': 0, 'sent1': 'Members of the procession walk down the street holding small horn brass instruments.', 'sent2': 'A drum line', 'startphrase': 'Members of the procession walk down the street holding small horn brass instruments. A drum line', 'video-id': 'anetv_jkn6uvmqwh4'} ``` While it looks like there are a lot of fields here, it is actually pretty straightforward: - `sent1` and `sent2`: these fields show how a sentence starts, and if you put the two together, you get the `startphrase` field. - `ending`: suggests a possible ending for how a sentence can end, but only one of them is correct. - `label`: identifies the correct sentence ending. ## Preprocess The next step is to load a BERT tokenizer to process the sentence starts and the four possible endings: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") ``` The preprocessing function you want to create needs to: 1. Make four copies of the `sent1` field and combine each of them with `sent2` to recreate how a sentence starts. 2. Combine `sent2` with each of the four possible sentence endings. 3. Flatten these two lists so you can tokenize them, and then unflatten them afterward so each example has a corresponding `input_ids`, `attention_mask`, and `labels` field. ```py >>> ending_names = ["ending0", "ending1", "ending2", "ending3"] >>> def preprocess_function(examples): ... first_sentences = [[context] * 4 for context in examples["sent1"]] ... question_headers = examples["sent2"] ... second_sentences = [ ... [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) ... ] ... first_sentences = sum(first_sentences, []) ... second_sentences = sum(second_sentences, []) ... tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True) ... return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()} ``` To apply the preprocessing function over the entire dataset, use 🤗 Datasets [`~datasets.Dataset.map`] method. You can speed up the `map` function by setting `batched=True` to process multiple elements of the dataset at once: ```py tokenized_swag = swag.map(preprocess_function, batched=True) ``` 🤗 Transformers doesn't have a data collator for multiple choice, so you'll need to adapt the [`DataCollatorWithPadding`] to create a batch of examples. It's more efficient to *dynamically pad* the sentences to the longest length in a batch during collation, instead of padding the whole dataset to the maximum length. `DataCollatorForMultipleChoice` flattens all the model inputs, applies padding, and then unflattens the results: <frameworkcontent> <pt> ```py >>> from dataclasses import dataclass >>> from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy >>> from typing import Optional, Union >>> import torch >>> @dataclass ... class DataCollatorForMultipleChoice: ... """ ... Data collator that will dynamically pad the inputs for multiple choice received. ... """ ... tokenizer: PreTrainedTokenizerBase ... padding: Union[bool, str, PaddingStrategy] = True ... max_length: Optional[int] = None ... pad_to_multiple_of: Optional[int] = None ... def __call__(self, features): ... label_name = "label" if "label" in features[0].keys() else "labels" ... labels = [feature.pop(label_name) for feature in features] ... batch_size = len(features) ... num_choices = len(features[0]["input_ids"]) ... flattened_features = [ ... [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ... ] ... flattened_features = sum(flattened_features, []) ... batch = self.tokenizer.pad( ... flattened_features, ... padding=self.padding, ... max_length=self.max_length, ... pad_to_multiple_of=self.pad_to_multiple_of, ... return_tensors="pt", ... ) ... batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()} ... batch["labels"] = torch.tensor(labels, dtype=torch.int64) ... return batch ``` </pt> <tf> ```py >>> from dataclasses import dataclass >>> from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy >>> from typing import Optional, Union >>> import tensorflow as tf >>> @dataclass ... class DataCollatorForMultipleChoice: ... """ ... Data collator that will dynamically pad the inputs for multiple choice received. ... """ ... tokenizer: PreTrainedTokenizerBase ... padding: Union[bool, str, PaddingStrategy] = True ... max_length: Optional[int] = None ... pad_to_multiple_of: Optional[int] = None ... def __call__(self, features): ... label_name = "label" if "label" in features[0].keys() else "labels" ... labels = [feature.pop(label_name) for feature in features] ... batch_size = len(features) ... num_choices = len(features[0]["input_ids"]) ... flattened_features = [ ... [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ... ] ... flattened_features = sum(flattened_features, []) ... batch = self.tokenizer.pad( ... flattened_features, ... padding=self.padding, ... max_length=self.max_length, ... pad_to_multiple_of=self.pad_to_multiple_of, ... return_tensors="tf", ... ) ... batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()} ... batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64) ... return batch ``` </tf> </frameworkcontent> ## Evaluate Including a metric during training is often helpful for evaluating your model's performance. You can quickly load a evaluation method with the 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) metric (see the 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric): ```py >>> import evaluate >>> accuracy = evaluate.load("accuracy") ``` Then create a function that passes your predictions and labels to [`~evaluate.EvaluationModule.compute`] to calculate the accuracy: ```py >>> import numpy as np >>> def compute_metrics(eval_pred): ... predictions, labels = eval_pred ... predictions = np.argmax(predictions, axis=1) ... return accuracy.compute(predictions=predictions, references=labels) ``` Your `compute_metrics` function is ready to go now, and you'll return to it when you setup your training. ## Train <frameworkcontent> <pt> <Tip> If you aren't familiar with finetuning a model with the [`Trainer`], take a look at the basic tutorial [here](../training#train-with-pytorch-trainer)! </Tip> You're ready to start training your model now! Load BERT with [`AutoModelForMultipleChoice`]: ```py >>> from transformers import AutoModelForMultipleChoice, TrainingArguments, Trainer >>> model = AutoModelForMultipleChoice.from_pretrained("google-bert/bert-base-uncased") ``` At this point, only three steps remain: 1. Define your training hyperparameters in [`TrainingArguments`]. The only required parameter is `output_dir` which specifies where to save your model. You'll push this model to the Hub by setting `push_to_hub=True` (you need to be signed in to Hugging Face to upload your model). At the end of each epoch, the [`Trainer`] will evaluate the accuracy and save the training checkpoint. 2. Pass the training arguments to [`Trainer`] along with the model, dataset, tokenizer, data collator, and `compute_metrics` function. 3. Call [`~Trainer.train`] to finetune your model. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_swag_model", ... eval_strategy="epoch", ... save_strategy="epoch", ... load_best_model_at_end=True, ... learning_rate=5e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... num_train_epochs=3, ... weight_decay=0.01, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_swag["train"], ... eval_dataset=tokenized_swag["validation"], ... processing_class=tokenizer, ... data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer), ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` Once training is completed, share your model to the Hub with the [`~transformers.Trainer.push_to_hub`] method so everyone can use your model: ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> If you aren't familiar with finetuning a model with Keras, take a look at the basic tutorial [here](../training#train-a-tensorflow-model-with-keras)! </Tip> To finetune a model in TensorFlow, start by setting up an optimizer function, learning rate schedule, and some training hyperparameters: ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_train_epochs = 2 >>> total_train_steps = (len(tokenized_swag["train"]) // batch_size) * num_train_epochs >>> optimizer, schedule = create_optimizer(init_lr=5e-5, num_warmup_steps=0, num_train_steps=total_train_steps) ``` Then you can load BERT with [`TFAutoModelForMultipleChoice`]: ```py >>> from transformers import TFAutoModelForMultipleChoice >>> model = TFAutoModelForMultipleChoice.from_pretrained("google-bert/bert-base-uncased") ``` Convert your datasets to the `tf.data.Dataset` format with [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]: ```py >>> data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer) >>> tf_train_set = model.prepare_tf_dataset( ... tokenized_swag["train"], ... shuffle=True, ... batch_size=batch_size, ... collate_fn=data_collator, ... ) >>> tf_validation_set = model.prepare_tf_dataset( ... tokenized_swag["validation"], ... shuffle=False, ... batch_size=batch_size, ... collate_fn=data_collator, ... ) ``` Configure the model for training with [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to: ```py >>> model.compile(optimizer=optimizer) # No loss argument! ``` The last two things to setup before you start training is to compute the accuracy from the predictions, and provide a way to push your model to the Hub. Both are done by using [Keras callbacks](../main_classes/keras_callbacks). Pass your `compute_metrics` function to [`~transformers.KerasMetricCallback`]: ```py >>> from transformers.keras_callbacks import KerasMetricCallback >>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set) ``` Specify where to push your model and tokenizer in the [`~transformers.PushToHubCallback`]: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="my_awesome_model", ... tokenizer=tokenizer, ... ) ``` Then bundle your callbacks together: ```py >>> callbacks = [metric_callback, push_to_hub_callback] ``` Finally, you're ready to start training your model! Call [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) with your training and validation datasets, the number of epochs, and your callbacks to finetune the model: ```py >>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=2, callbacks=callbacks) ``` Once training is completed, your model is automatically uploaded to the Hub so everyone can use it! </tf> </frameworkcontent> <Tip> For a more in-depth example of how to finetune a model for multiple choice, take a look at the corresponding [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb) or [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb). </Tip> ## Inference Great, now that you've finetuned a model, you can use it for inference! Come up with some text and two candidate answers: ```py >>> prompt = "France has a bread law, Le Décret Pain, with strict rules on what is allowed in a traditional baguette." >>> candidate1 = "The law does not apply to croissants and brioche." >>> candidate2 = "The law applies to baguettes." ``` <frameworkcontent> <pt> Tokenize each prompt and candidate answer pair and return PyTorch tensors. You should also create some `labels`: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_swag_model") >>> inputs = tokenizer([[prompt, candidate1], [prompt, candidate2]], return_tensors="pt", padding=True) >>> labels = torch.tensor(0).unsqueeze(0) ``` Pass your inputs and labels to the model and return the `logits`: ```py >>> from transformers import AutoModelForMultipleChoice >>> model = AutoModelForMultipleChoice.from_pretrained("username/my_awesome_swag_model") >>> outputs = model(**{k: v.unsqueeze(0) for k, v in inputs.items()}, labels=labels) >>> logits = outputs.logits ``` Get the class with the highest probability: ```py >>> predicted_class = logits.argmax().item() >>> predicted_class 0 ``` </pt> <tf> Tokenize each prompt and candidate answer pair and return TensorFlow tensors: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_swag_model") >>> inputs = tokenizer([[prompt, candidate1], [prompt, candidate2]], return_tensors="tf", padding=True) ``` Pass your inputs to the model and return the `logits`: ```py >>> from transformers import TFAutoModelForMultipleChoice >>> model = TFAutoModelForMultipleChoice.from_pretrained("username/my_awesome_swag_model") >>> inputs = {k: tf.expand_dims(v, 0) for k, v in inputs.items()} >>> outputs = model(inputs) >>> logits = outputs.logits ``` Get the class with the highest probability: ```py >>> predicted_class = int(tf.math.argmax(logits, axis=-1)[0]) >>> predicted_class 0 ``` </tf> </frameworkcontent>
transformers/docs/source/en/tasks/multiple_choice.md/0
{ "file_path": "transformers/docs/source/en/tasks/multiple_choice.md", "repo_id": "transformers", "token_count": 5495 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Testing Let's take a look at how 🤗 Transformers models are tested and how you can write new tests and improve the existing ones. There are 2 test suites in the repository: 1. `tests` -- tests for the general API 2. `examples` -- tests primarily for various applications that aren't part of the API ## How transformers are tested 1. Once a PR is submitted it gets tested with 9 CircleCi jobs. Every new commit to that PR gets retested. These jobs are defined in this [config file](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml), so that if needed you can reproduce the same environment on your machine. These CI jobs don't run `@slow` tests. 2. There are 3 jobs run by [github actions](https://github.com/huggingface/transformers/actions): - [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml): checks whether torch hub integration works. - [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): runs fast tests on GPU only on commits on `main`. It only runs if a commit on `main` has updated the code in one of the following folders: `src`, `tests`, `.github` (to prevent running on added model cards, notebooks, etc.) - [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): runs normal and slow tests on GPU in `tests` and `examples`: ```bash RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/ ``` The results can be observed [here](https://github.com/huggingface/transformers/actions). ## Running tests ### Choosing which tests to run This document goes into many details of how tests can be run. If after reading everything, you need even more details you will find them [here](https://docs.pytest.org/en/latest/usage.html). Here are some most useful ways of running tests. Run all: ```console pytest ``` or: ```bash make test ``` Note that the latter is defined as: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` which tells pytest to: - run as many test processes as they are CPU cores (which could be too many if you don't have a ton of RAM!) - ensure that all tests from the same file will be run by the same test process - do not capture output - run in verbose mode ### Getting the list of all tests All tests of the test suite: ```bash pytest --collect-only -q ``` All tests of a given test file: ```bash pytest tests/test_optimization.py --collect-only -q ``` ### Run a specific test module To run an individual test module: ```bash pytest tests/utils/test_logging.py ``` ### Run specific tests Since unittest is used inside most of the tests, to run specific subtests you need to know the name of the unittest class containing those tests. For example, it could be: ```bash pytest tests/test_optimization.py::OptimizationTest::test_adam_w ``` Here: - `tests/test_optimization.py` - the file with tests - `OptimizationTest` - the name of the class - `test_adam_w` - the name of the specific test function If the file contains multiple classes, you can choose to run only tests of a given class. For example: ```bash pytest tests/test_optimization.py::OptimizationTest ``` will run all the tests inside that class. As mentioned earlier you can see what tests are contained inside the `OptimizationTest` class by running: ```bash pytest tests/test_optimization.py::OptimizationTest --collect-only -q ``` You can run tests by keyword expressions. To run only tests whose name contains `adam`: ```bash pytest -k adam tests/test_optimization.py ``` Logical `and` and `or` can be used to indicate whether all keywords should match or either. `not` can be used to negate. To run all tests except those whose name contains `adam`: ```bash pytest -k "not adam" tests/test_optimization.py ``` And you can combine the two patterns in one: ```bash pytest -k "ada and not adam" tests/test_optimization.py ``` For example to run both `test_adafactor` and `test_adam_w` you can use: ```bash pytest -k "test_adafactor or test_adam_w" tests/test_optimization.py ``` Note that we use `or` here, since we want either of the keywords to match to include both. If you want to include only tests that include both patterns, `and` is to be used: ```bash pytest -k "test and ada" tests/test_optimization.py ``` ### Run `accelerate` tests Sometimes you need to run `accelerate` tests on your models. For that you can just add `-m accelerate_tests` to your command, if let's say you want to run these tests on `OPT` run: ```bash RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py ``` ### Run documentation tests In order to test whether the documentation examples are correct, you should check that the `doctests` are passing. As an example, let's use [`WhisperModel.forward`'s docstring](https://github.com/huggingface/transformers/blob/1124d95dbb1a3512d3e80791d73d0f541d1d7e9f/src/transformers/models/whisper/modeling_whisper.py#L1591-L1609) ```python r""" Returns: Example: ```python >>> import torch >>> from transformers import WhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = WhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" ``` Just run the following line to automatically test every docstring example in the desired file: ```bash pytest --doctest-modules <path_to_file_or_dir> ``` If the file has a markdown extention, you should add the `--doctest-glob="*.md"` argument. ### Run only modified tests You can run the tests related to the unstaged files or the current branch (according to Git) by using [pytest-picked](https://github.com/anapaulagomes/pytest-picked). This is a great way of quickly testing your changes didn't break anything, since it won't run the tests related to files you didn't touch. ```bash pip install pytest-picked ``` ```bash pytest --picked ``` All tests will be run from files and folders which are modified, but not yet committed. ### Automatically rerun failed tests on source modification [pytest-xdist](https://github.com/pytest-dev/pytest-xdist) provides a very useful feature of detecting all failed tests, and then waiting for you to modify files and continuously re-rerun those failing tests until they pass while you fix them. So that you don't need to re start pytest after you made the fix. This is repeated until all tests pass after which again a full run is performed. ```bash pip install pytest-xdist ``` To enter the mode: `pytest -f` or `pytest --looponfail` File changes are detected by looking at `looponfailroots` root directories and all of their contents (recursively). If the default for this value does not work for you, you can change it in your project by setting a configuration option in `setup.cfg`: ```ini [tool:pytest] looponfailroots = transformers tests ``` or `pytest.ini`/``tox.ini`` files: ```ini [pytest] looponfailroots = transformers tests ``` This would lead to only looking for file changes in the respective directories, specified relatively to the ini-file’s directory. [pytest-watch](https://github.com/joeyespo/pytest-watch) is an alternative implementation of this functionality. ### Skip a test module If you want to run all test modules, except a few you can exclude them by giving an explicit list of tests to run. For example, to run all except `test_modeling_*.py` tests: ```bash pytest *ls -1 tests/*py | grep -v test_modeling* ``` ### Clearing state CI builds and when isolation is important (against speed), cache should be cleared: ```bash pytest --cache-clear tests ``` ### Running tests in parallel As mentioned earlier `make test` runs tests in parallel via `pytest-xdist` plugin (`-n X` argument, e.g. `-n 2` to run 2 parallel jobs). `pytest-xdist`'s `--dist=` option allows one to control how the tests are grouped. `--dist=loadfile` puts the tests located in one file onto the same process. Since the order of executed tests is different and unpredictable, if running the test suite with `pytest-xdist` produces failures (meaning we have some undetected coupled tests), use [pytest-replay](https://github.com/ESSS/pytest-replay) to replay the tests in the same order, which should help with then somehow reducing that failing sequence to a minimum. ### Test order and repetition It's good to repeat the tests several times, in sequence, randomly, or in sets, to detect any potential inter-dependency and state-related bugs (tear down). And the straightforward multiple repetition is just good to detect some problems that get uncovered by randomness of DL. #### Repeat tests - [pytest-flakefinder](https://github.com/dropbox/pytest-flakefinder): ```bash pip install pytest-flakefinder ``` And then run every test multiple times (50 by default): ```bash pytest --flake-finder --flake-runs=5 tests/test_failing_test.py ``` <Tip> This plugin doesn't work with `-n` flag from `pytest-xdist`. </Tip> <Tip> There is another plugin `pytest-repeat`, but it doesn't work with `unittest`. </Tip> #### Run tests in a random order ```bash pip install pytest-random-order ``` Important: the presence of `pytest-random-order` will automatically randomize tests, no configuration change or command line options is required. As explained earlier this allows detection of coupled tests - where one test's state affects the state of another. When `pytest-random-order` is installed it will print the random seed it used for that session, e.g: ```bash pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` So that if the given particular sequence fails, you can reproduce it by adding that exact seed, e.g.: ```bash pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` It will only reproduce the exact order if you use the exact same list of tests (or no list at all). Once you start to manually narrowing down the list you can no longer rely on the seed, but have to list them manually in the exact order they failed and tell pytest to not randomize them instead using `--random-order-bucket=none`, e.g.: ```bash pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py ``` To disable the shuffling for all tests: ```bash pytest --random-order-bucket=none ``` By default `--random-order-bucket=module` is implied, which will shuffle the files on the module levels. It can also shuffle on `class`, `package`, `global` and `none` levels. For the complete details please see its [documentation](https://github.com/jbasko/pytest-random-order). Another randomization alternative is: [`pytest-randomly`](https://github.com/pytest-dev/pytest-randomly). This module has a very similar functionality/interface, but it doesn't have the bucket modes available in `pytest-random-order`. It has the same problem of imposing itself once installed. ### Look and feel variations #### pytest-sugar [pytest-sugar](https://github.com/Frozenball/pytest-sugar) is a plugin that improves the look-n-feel, adds a progressbar, and show tests that fail and the assert instantly. It gets activated automatically upon installation. ```bash pip install pytest-sugar ``` To run tests without it, run: ```bash pytest -p no:sugar ``` or uninstall it. #### Report each sub-test name and its progress For a single or a group of tests via `pytest` (after `pip install pytest-pspec`): ```bash pytest --pspec tests/test_optimization.py ``` #### Instantly shows failed tests [pytest-instafail](https://github.com/pytest-dev/pytest-instafail) shows failures and errors instantly instead of waiting until the end of test session. ```bash pip install pytest-instafail ``` ```bash pytest --instafail ``` ### To GPU or not to GPU On a GPU-enabled setup, to test in CPU-only mode add `CUDA_VISIBLE_DEVICES=""` for CUDA GPUs: ```bash CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py ``` or if you have multiple gpus, you can specify which one is to be used by `pytest`. For example, to use only the second gpu if you have gpus `0` and `1`, you can run: ```bash CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py ``` For Intel GPUs, use `ZE_AFFINITY_MASK` instead of `CUDA_VISIBLE_DEVICES` in the above example. This is handy when you want to run different tasks on different GPUs. Some tests must be run on CPU-only, others on either CPU or GPU or TPU, yet others on multiple-GPUs. The following skip decorators are used to set the requirements of tests CPU/GPU/XPU/TPU-wise: - `require_torch` - this test will run only under torch - `require_torch_gpu` - as `require_torch` plus requires at least 1 GPU - `require_torch_multi_gpu` - as `require_torch` plus requires at least 2 GPUs - `require_torch_non_multi_gpu` - as `require_torch` plus requires 0 or 1 GPUs - `require_torch_up_to_2_gpus` - as `require_torch` plus requires 0 or 1 or 2 GPUs - `require_torch_xla` - as `require_torch` plus requires at least 1 TPU Let's depict the GPU requirements in the following table: | n gpus | decorator | |--------|--------------------------------| | `>= 0` | `@require_torch` | | `>= 1` | `@require_torch_gpu` | | `>= 2` | `@require_torch_multi_gpu` | | `< 2` | `@require_torch_non_multi_gpu` | | `< 3` | `@require_torch_up_to_2_gpus` | For example, here is a test that must be run only when there are 2 or more GPUs available and pytorch is installed: ```python no-style @require_torch_multi_gpu def test_example_with_multi_gpu(): ``` If a test requires `tensorflow` use the `require_tf` decorator. For example: ```python no-style @require_tf def test_tf_thing_with_tensorflow(): ``` These decorators can be stacked. For example, if a test is slow and requires at least one GPU under pytorch, here is how to set it up: ```python no-style @require_torch_gpu @slow def test_example_slow_on_gpu(): ``` Some decorators like `@parametrized` rewrite test names, therefore `@require_*` skip decorators have to be listed last for them to work correctly. Here is an example of the correct usage: ```python no-style @parameterized.expand(...) @require_torch_multi_gpu def test_integration_foo(): ``` This order problem doesn't exist with `@pytest.mark.parametrize`, you can put it first or last and it will still work. But it only works with non-unittests. Inside tests: - How many GPUs are available: ```python from transformers.testing_utils import get_gpu_count n_gpu = get_gpu_count() # works with torch and tf ``` ### Testing with a specific PyTorch backend or device To run the test suite on a specific torch device add `TRANSFORMERS_TEST_DEVICE="$device"` where `$device` is the target backend. For example, to test on CPU only: ```bash TRANSFORMERS_TEST_DEVICE="cpu" pytest tests/utils/test_logging.py ``` This variable is useful for testing custom or less common PyTorch backends such as `mps`, `xpu` or `npu`. It can also be used to achieve the same effect as `CUDA_VISIBLE_DEVICES` by targeting specific GPUs or testing in CPU-only mode. Certain devices will require an additional import after importing `torch` for the first time. This can be specified using the environment variable `TRANSFORMERS_TEST_BACKEND`: ```bash TRANSFORMERS_TEST_BACKEND="torch_npu" pytest tests/utils/test_logging.py ``` Alternative backends may also require the replacement of device-specific functions. For example `torch.cuda.manual_seed` may need to be replaced with a device-specific seed setter like `torch.npu.manual_seed` or `torch.xpu.manual_seed` to correctly set a random seed on the device. To specify a new backend with backend-specific device functions when running the test suite, create a Python device specification file `spec.py` in the format: ```python import torch import torch_npu # for xpu, replace it with `import intel_extension_for_pytorch` # !! Further additional imports can be added here !! # Specify the device name (eg. 'cuda', 'cpu', 'npu', 'xpu', 'mps') DEVICE_NAME = 'npu' # Specify device-specific backends to dispatch to. # If not specified, will fallback to 'default' in 'testing_utils.py` MANUAL_SEED_FN = torch.npu.manual_seed EMPTY_CACHE_FN = torch.npu.empty_cache DEVICE_COUNT_FN = torch.npu.device_count ``` This format also allows for specification of any additional imports required. To use this file to replace equivalent methods in the test suite, set the environment variable `TRANSFORMERS_TEST_DEVICE_SPEC` to the path of the spec file, e.g. `TRANSFORMERS_TEST_DEVICE_SPEC=spec.py`. Currently, only `MANUAL_SEED_FN`, `EMPTY_CACHE_FN` and `DEVICE_COUNT_FN` are supported for device-specific dispatch. ### Distributed training `pytest` can't deal with distributed training directly. If this is attempted - the sub-processes don't do the right thing and end up thinking they are `pytest` and start running the test suite in loops. It works, however, if one spawns a normal process that then spawns off multiple workers and manages the IO pipes. Here are some tests that use it: - [test_trainer_distributed.py](https://github.com/huggingface/transformers/tree/main/tests/trainer/test_trainer_distributed.py) - [test_deepspeed.py](https://github.com/huggingface/transformers/tree/main/tests/deepspeed/test_deepspeed.py) To jump right into the execution point, search for the `execute_subprocess_async` call in those tests. You will need at least 2 GPUs to see these tests in action: ```bash CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py ``` ### Output capture During test execution any output sent to `stdout` and `stderr` is captured. If a test or a setup method fails, its according captured output will usually be shown along with the failure traceback. To disable output capturing and to get the `stdout` and `stderr` normally, use `-s` or `--capture=no`: ```bash pytest -s tests/utils/test_logging.py ``` To send test results to JUnit format output: ```bash pytest tests --junitxml=result.xml ``` ### Color control To have no color (e.g., yellow on white background is not readable): ```bash pytest --color=no tests/utils/test_logging.py ``` ### Sending test report to online pastebin service Creating a URL for each test failure: ```bash pytest --pastebin=failed tests/utils/test_logging.py ``` This will submit test run information to a remote Paste service and provide a URL for each failure. You may select tests as usual or add for example -x if you only want to send one particular failure. Creating a URL for a whole test session log: ```bash pytest --pastebin=all tests/utils/test_logging.py ``` ## Writing tests 🤗 transformers tests are based on `unittest`, but run by `pytest`, so most of the time features from both systems can be used. You can read [here](https://docs.pytest.org/en/stable/unittest.html) which features are supported, but the important thing to remember is that most `pytest` fixtures don't work. Neither parametrization, but we use the module `parameterized` that works in a similar way. ### Parametrization Often, there is a need to run the same test multiple times, but with different arguments. It could be done from within the test, but then there is no way of running that test for just one set of arguments. ```python # test_this1.py import unittest from parameterized import parameterized class TestMathUnitTest(unittest.TestCase): @parameterized.expand( [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ] ) def test_floor(self, name, input, expected): assert_equal(math.floor(input), expected) ``` Now, by default this test will be run 3 times, each time with the last 3 arguments of `test_floor` being assigned the corresponding arguments in the parameter list. and you could run just the `negative` and `integer` sets of params with: ```bash pytest -k "negative and integer" tests/test_mytest.py ``` or all but `negative` sub-tests, with: ```bash pytest -k "not negative" tests/test_mytest.py ``` Besides using the `-k` filter that was just mentioned, you can find out the exact name of each sub-test and run any or all of them using their exact names. ```bash pytest test_this1.py --collect-only -q ``` and it will list: ```bash test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction ``` So now you can run just 2 specific sub-tests: ```bash pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer ``` The module [parameterized](https://pypi.org/project/parameterized/) which is already in the developer dependencies of `transformers` works for both: `unittests` and `pytest` tests. If, however, the test is not a `unittest`, you may use `pytest.mark.parametrize` (or you may see it being used in some existing tests, mostly under `examples`). Here is the same example, this time using `pytest`'s `parametrize` marker: ```python # test_this2.py import pytest @pytest.mark.parametrize( "name, input, expected", [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ], ) def test_floor(name, input, expected): assert_equal(math.floor(input), expected) ``` Same as with `parameterized`, with `pytest.mark.parametrize` you can have a fine control over which sub-tests are run, if the `-k` filter doesn't do the job. Except, this parametrization function creates a slightly different set of names for the sub-tests. Here is what they look like: ```bash pytest test_this2.py --collect-only -q ``` and it will list: ```bash test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1] ``` So now you can run just the specific test: ```bash pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0] ``` as in the previous example. ### Files and directories In tests often we need to know where things are relative to the current test file, and it's not trivial since the test could be invoked from more than one directory or could reside in sub-directories with different depths. A helper class `transformers.test_utils.TestCasePlus` solves this problem by sorting out all the basic paths and provides easy accessors to them: - `pathlib` objects (all fully resolved): - `test_file_path` - the current test file path, i.e. `__file__` - `test_file_dir` - the directory containing the current test file - `tests_dir` - the directory of the `tests` test suite - `examples_dir` - the directory of the `examples` test suite - `repo_root_dir` - the directory of the repository - `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides) - stringified paths---same as above but these return paths as strings, rather than `pathlib` objects: - `test_file_path_str` - `test_file_dir_str` - `tests_dir_str` - `examples_dir_str` - `repo_root_dir_str` - `src_dir_str` To start using those all you need is to make sure that the test resides in a subclass of `transformers.test_utils.TestCasePlus`. For example: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_local_locations(self): data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro" ``` If you don't need to manipulate paths via `pathlib` or you just need a path as a string, you can always invoked `str()` on the `pathlib` object or use the accessors ending with `_str`. For example: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_stringified_locations(self): examples_dir = self.examples_dir_str ``` ### Temporary files and directories Using unique temporary files and directories are essential for parallel test running, so that the tests won't overwrite each other's data. Also we want to get the temporary files and directories removed at the end of each test that created them. Therefore, using packages like `tempfile`, which address these needs is essential. However, when debugging tests, you need to be able to see what goes into the temporary file or directory and you want to know it's exact path and not having it randomized on every test re-run. A helper class `transformers.test_utils.TestCasePlus` is best used for such purposes. It's a sub-class of `unittest.TestCase`, so we can easily inherit from it in the test modules. Here is an example of its usage: ```python from transformers.testing_utils import TestCasePlus class ExamplesTests(TestCasePlus): def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` This code creates a unique temporary directory, and sets `tmp_dir` to its location. - Create a unique temporary dir: ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` `tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the test. - Create a temporary dir of my choice, ensure it's empty before the test starts and don't empty it after the test. ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir("./xxx") ``` This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests didn't leave any data in there. - You can override the default behavior by directly overriding the `before` and `after` args, leading to one of the following behaviors: - `before=True`: the temporary dir will always be cleared at the beginning of the test. - `before=False`: if the temporary dir already existed, any existing files will remain there. - `after=True`: the temporary dir will always be deleted at the end of the test. - `after=False`: the temporary dir will always be left intact at the end of the test. <Tip> In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem will get nuked. i.e. please always pass paths that start with `./`. </Tip> <Tip> Each test can register multiple temporary directories and they all will get auto-removed, unless requested otherwise. </Tip> ### Temporary sys.path override If you need to temporary override `sys.path` to import from another test for example, you can use the `ExtendSysPath` context manager. Example: ```python import os from transformers.testing_utils import ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/.."): from test_trainer import TrainerIntegrationCommon # noqa ``` ### Skipping tests This is useful when a bug is found and a new test is written, yet the bug is not fixed yet. In order to be able to commit it to the main repository we need make sure it's skipped during `make test`. Methods: - A **skip** means that you expect your test to pass only if some conditions are met, otherwise pytest should skip running the test altogether. Common examples are skipping windows-only tests on non-windows platforms, or skipping tests that depend on an external resource which is not available at the moment (for example a database). - A **xfail** means that you expect a test to fail for some reason. A common example is a test for a feature not yet implemented, or a bug not yet fixed. When a test passes despite being expected to fail (marked with pytest.mark.xfail), it’s an xpass and will be reported in the test summary. One of the important differences between the two is that `skip` doesn't run the test, and `xfail` does. So if the code that's buggy causes some bad state that will affect other tests, do not use `xfail`. #### Implementation - Here is how to skip whole test unconditionally: ```python no-style @unittest.skip(reason="this bug needs to be fixed") def test_feature_x(): ``` or via pytest: ```python no-style @pytest.mark.skip(reason="this bug needs to be fixed") ``` or the `xfail` way: ```python no-style @pytest.mark.xfail def test_feature_x(): ``` Here's how to skip a test based on internal checks within the test: ```python def test_feature_x(): if not has_something(): pytest.skip("unsupported configuration") ``` or the whole module: ```python import pytest if not pytest.config.getoption("--custom-flag"): pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True) ``` or the `xfail` way: ```python def test_feature_x(): pytest.xfail("expected to fail until bug XYZ is fixed") ``` - Here is how to skip all tests in a module if some import is missing: ```python docutils = pytest.importorskip("docutils", minversion="0.3") ``` - Skip a test based on a condition: ```python no-style @pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher") def test_feature_x(): ``` or: ```python no-style @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_feature_x(): ``` or skip the whole module: ```python no-style @pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") class TestClass(): def test_feature_x(self): ``` More details, example and ways are [here](https://docs.pytest.org/en/latest/skipping.html). ### Slow tests The library of tests is ever-growing, and some of the tests take minutes to run, therefore we can't afford waiting for an hour for the test suite to complete on CI. Therefore, with some exceptions for essential tests, slow tests should be marked as in the example below: ```python no-style from transformers.testing_utils import slow @slow def test_integration_foo(): ``` Once a test is marked as `@slow`, to run such tests set `RUN_SLOW=1` env var, e.g.: ```bash RUN_SLOW=1 pytest tests ``` Some decorators like `@parameterized` rewrite test names, therefore `@slow` and the rest of the skip decorators `@require_*` have to be listed last for them to work correctly. Here is an example of the correct usage: ```python no-style @parameterized.expand(...) @slow def test_integration_foo(): ``` As explained at the beginning of this document, slow tests get to run on a scheduled basis, rather than in PRs CI checks. So it's possible that some problems will be missed during a PR submission and get merged. Such problems will get caught during the next scheduled CI job. But it also means that it's important to run the slow tests on your machine before submitting the PR. Here is a rough decision making mechanism for choosing which tests should be marked as slow: If the test is focused on one of the library's internal components (e.g., modeling files, tokenization files, pipelines), then we should run that test in the non-slow test suite. If it's focused on an other aspect of the library, such as the documentation or the examples, then we should run these tests in the slow test suite. And then, to refine this approach we should have exceptions: - All tests that need to download a heavy set of weights or a dataset that is larger than ~50MB (e.g., model or tokenizer integration tests, pipeline integration tests) should be set to slow. If you're adding a new model, you should create and upload to the hub a tiny version of it (with random weights) for integration tests. This is discussed in the following paragraphs. - All tests that need to do a training not specifically optimized to be fast should be set to slow. - We can introduce exceptions if some of these should-be-non-slow tests are excruciatingly slow, and set them to `@slow`. Auto-modeling tests, which save and load large files to disk, are a good example of tests that are marked as `@slow`. - If a test completes under 1 second on CI (including downloads if any) then it should be a normal test regardless. Collectively, all the non-slow tests need to cover entirely the different internals, while remaining fast. For example, a significant coverage can be achieved by testing with specially created tiny models with random weights. Such models have the very minimal number of layers (e.g., 2), vocab size (e.g., 1000), etc. Then the `@slow` tests can use large slow models to do qualitative testing. To see the use of these simply look for *tiny* models with: ```bash grep tiny tests examples ``` Here is an example of a [script](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py) that created the tiny model [stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de). You can easily adjust it to your specific model's architecture. It's easy to measure the run-time incorrectly if for example there is an overheard of downloading a huge model, but if you test it locally the downloaded files would be cached and thus the download time not measured. Hence check the execution speed report in CI logs instead (the output of `pytest --durations=0 tests`). That report is also useful to find slow outliers that aren't marked as such, or which need to be re-written to be fast. If you notice that the test suite starts getting slow on CI, the top listing of this report will show the slowest tests. ### Testing the stdout/stderr output In order to test functions that write to `stdout` and/or `stderr`, the test can access those streams using the `pytest`'s [capsys system](https://docs.pytest.org/en/latest/capture.html). Here is how this is accomplished: ```python import sys def print_to_stdout(s): print(s) def print_to_stderr(s): sys.stderr.write(s) def test_result_and_stdout(capsys): msg = "Hello" print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() # consume the captured output streams # optional: if you want to replay the consumed streams: sys.stdout.write(out) sys.stderr.write(err) # test: assert msg in out assert msg in err ``` And, of course, most of the time, `stderr` will come as a part of an exception, so try/except has to be used in such a case: ```python def raise_exception(msg): raise ValueError(msg) def test_something_exception(): msg = "Not a good value" error = "" try: raise_exception(msg) except Exception as e: error = str(e) assert msg in error, f"{msg} is in the exception:\n{error}" ``` Another approach to capturing stdout is via `contextlib.redirect_stdout`: ```python from io import StringIO from contextlib import redirect_stdout def print_to_stdout(s): print(s) def test_result_and_stdout(): msg = "Hello" buffer = StringIO() with redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() # optional: if you want to replay the consumed streams: sys.stdout.write(out) # test: assert msg in out ``` An important potential issue with capturing stdout is that it may contain `\r` characters that in normal `print` reset everything that has been printed so far. There is no problem with `pytest`, but with `pytest -s` these characters get included in the buffer, so to be able to have the test run with and without `-s`, you have to make an extra cleanup to the captured output, using `re.sub(r'~.*\r', '', buf, 0, re.M)`. But, then we have a helper context manager wrapper to automatically take care of it all, regardless of whether it has some `\r`'s in it or not, so it's a simple: ```python from transformers.testing_utils import CaptureStdout with CaptureStdout() as cs: function_that_writes_to_stdout() print(cs.out) ``` Here is a full test example: ```python from transformers.testing_utils import CaptureStdout msg = "Secret message\r" final = "Hello World" with CaptureStdout() as cs: print(msg + final) assert cs.out == final + "\n", f"captured: {cs.out}, expecting {final}" ``` If you'd like to capture `stderr` use the `CaptureStderr` class instead: ```python from transformers.testing_utils import CaptureStderr with CaptureStderr() as cs: function_that_writes_to_stderr() print(cs.err) ``` If you need to capture both streams at once, use the parent `CaptureStd` class: ```python from transformers.testing_utils import CaptureStd with CaptureStd() as cs: function_that_writes_to_stdout_and_stderr() print(cs.err, cs.out) ``` Also, to aid debugging test issues, by default these context managers automatically replay the captured streams on exit from the context. ### Capturing logger stream If you need to validate the output of a logger, you can use `CaptureLogger`: ```python from transformers import logging from transformers.testing_utils import CaptureLogger msg = "Testing 1, 2, 3" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.bart.tokenization_bart") with CaptureLogger(logger) as cl: logger.info(msg) assert cl.out, msg + "\n" ``` ### Testing with environment variables If you want to test the impact of environment variables for a specific test you can use a helper decorator `transformers.testing_utils.mockenv` ```python from transformers.testing_utils import mockenv class HfArgumentParserTest(unittest.TestCase): @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) ``` At times an external program needs to be called, which requires setting `PYTHONPATH` in `os.environ` to include multiple local paths. A helper class `transformers.test_utils.TestCasePlus` comes to help: ```python from transformers.testing_utils import TestCasePlus class EnvExampleTest(TestCasePlus): def test_external_prog(self): env = self.get_env() # now call the external program, passing `env` to it ``` Depending on whether the test file was under the `tests` test suite or `examples` it'll correctly set up `env[PYTHONPATH]` to include one of these two directories, and also the `src` directory to ensure the testing is done against the current repo, and finally with whatever `env[PYTHONPATH]` was already set to before the test was called if anything. This helper method creates a copy of the `os.environ` object, so the original remains intact. ### Getting reproducible results In some situations you may want to remove randomness for your tests. To get identical reproducible results set, you will need to fix the seed: ```python seed = 42 # python RNG import random random.seed(seed) # pytorch RNGs import torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = True if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # numpy RNG import numpy as np np.random.seed(seed) # tf RNG import tensorflow as tf tf.random.set_seed(seed) ``` ### Debugging tests To start a debugger at the point of the warning, do this: ```bash pytest tests/utils/test_logging.py -W error::UserWarning --pdb ``` ## Working with github actions workflows To trigger a self-push workflow CI job, you must: 1. Create a new branch on `transformers` origin (not a fork!). 2. The branch name has to start with either `ci_` or `ci-` (`main` triggers it too, but we can't do PRs on `main`). It also gets triggered only for specific paths - you can find the up-to-date definition in case it changed since this document has been written [here](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml) under *push:* 3. Create a PR from this branch. 4. Then you can see the job appear [here](https://github.com/huggingface/transformers/actions/workflows/self-push.yml). It may not run right away if there is a backlog. ## Testing Experimental CI Features Testing CI features can be potentially problematic as it can interfere with the normal CI functioning. Therefore if a new CI feature is to be added, it should be done as following. 1. Create a new dedicated job that tests what needs to be tested 2. The new job must always succeed so that it gives us a green ✓ (details below). 3. Let it run for some days to see that a variety of different PR types get to run on it (user fork branches, non-forked branches, branches originating from github.com UI direct file edit, various forced pushes, etc. - there are so many) while monitoring the experimental job's logs (not the overall job green as it's purposefully always green) 4. When it's clear that everything is solid, then merge the new changes into existing jobs. That way experiments on CI functionality itself won't interfere with the normal workflow. Now how can we make the job always succeed while the new CI feature is being developed? Some CIs, like TravisCI support ignore-step-failure and will report the overall job as successful, but CircleCI and Github Actions as of this writing don't support that. So the following workaround can be used: 1. `set +euo pipefail` at the beginning of the run command to suppress most potential failures in the bash script. 2. the last command must be a success: `echo "done"` or just `true` will do Here is an example: ```yaml - run: name: run CI experiment command: | set +euo pipefail echo "setting run-all-despite-any-errors-mode" this_command_will_fail echo "but bash continues to run" # emulate another failure false # but the last command must be a success echo "during experiment do not remove: reporting success to CI, even if there were failures" ``` For simple commands you could also do: ```bash cmd_that_may_fail || true ``` Of course, once satisfied with the results, integrate the experimental step or job with the rest of the normal jobs, while removing `set +euo pipefail` or any other things you may have added to ensure that the experimental job doesn't interfere with the normal CI functioning. This whole process would have been much easier if we only could set something like `allow-failure` for the experimental step, and let it fail without impacting the overall status of PRs. But as mentioned earlier CircleCI and Github Actions don't support it at the moment. You can vote for this feature and see where it is at these CI-specific threads: - [Github Actions:](https://github.com/actions/toolkit/issues/399) - [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344) ## DeepSpeed integration For a PR that involves the DeepSpeed integration, keep in mind our CircleCI PR CI setup doesn't have GPUs. Tests requiring GPUs are run on a different CI nightly. This means if you get a passing CI report in your PR, it doesn’t mean the DeepSpeed tests pass. To run DeepSpeed tests: ```bash RUN_SLOW=1 pytest tests/deepspeed/test_deepspeed.py ``` Any changes to the modeling or PyTorch examples code requires running the model zoo tests as well. ```bash RUN_SLOW=1 pytest tests/deepspeed ```
transformers/docs/source/en/testing.md/0
{ "file_path": "transformers/docs/source/en/testing.md", "repo_id": "transformers", "token_count": 13559 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Plantillas para Modelos de Chat ## Introducción Un caso de uso cada vez más común para LLMs es **el chat**. En un contexto de chat, en lugar de continuar una única cadena de texto (como es el caso con un modelo de lenguaje estándar), el modelo continúa una conversación que consta de uno o más **mensajes**, cada uno de los cuales incluye un **rol**, como "usuario" o "asistente", así como el texto del mensaje. Al igual que con la tokenización, diferentes modelos esperan formatos de entrada muy diferentes para el chat. Esta es la razón por la que agregamos las plantillas de chat como una característica. Las plantillas de chat son parte del tokenizador. Especifican cómo convertir conversaciones, representadas como listas de mensajes, en una única cadena tokenizable en el formato que el modelo espera. Vamos a hacer esto con un ejemplo concreto utilizando el modelo `BlenderBot`. BlenderBot tiene una plantilla predeterminada extremadamente simple, que principalmente solo agrega espacios en blanco entre rondas de diálogo: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) " Hello, how are you? I'm doing great. How can I help you today? I'd like to show off how chat templating works!</s>" ``` Observa cómo todo el chat se condensa en una sola cadena. Si usamos `tokenize=True`, que es la configuración predeterminada, esa cadena también será tokenizada para nosotros. Sin embargo, para ver una plantilla más compleja en acción, usemos el modelo `mistralai/Mistral-7B-Instruct-v0.1` ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") >>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) "<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]" ``` Ten en cuenta que esta vez, el tokenizador ha añadido los tokens de control [INST] y [/INST] para indicar el inicio y el final de los mensajes de usuario (¡pero no de los mensajes del asistente!). Mistral-instruct fue entrenado con estos tokens, pero BlenderBot no lo fue. ## ¿Cómo uso las plantillas de chat? Como puedes ver en el ejemplo anterior, las plantillas de chat son fáciles de usar. Simplemente construye una lista de mensajes, con claves de `rol` y `contenido`, y luego pásala al método [`~PreTrainedTokenizer.apply_chat_template`]. Una vez que hagas eso, ¡obtendrás una salida lista para usar! Al utilizar plantillas de chat como entrada para la generación de modelos, también es una buena idea usar `add_generation_prompt=True` para agregar una [indicación de generación](#¿Qué-son-los-"generation-prompts"?). Aquí tienes un ejemplo de cómo preparar la entrada para `model.generate()` utilizando el modelo de asistente `Zephyr`: ```python from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "HuggingFaceH4/zephyr-7b-beta" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint) # You may want to use bfloat16 and/or move to GPU here messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") print(tokenizer.decode(tokenized_chat[0])) ``` Esto generará una cadena en el formato de entrada que Zephyr espera. ```text <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> ``` Ahora que nuestra entrada está formateada correctamente para Zephyr, podemos usar el modelo para generar una respuesta a la pregunta del usuario: ```python outputs = model.generate(tokenized_chat, max_new_tokens=128) print(tokenizer.decode(outputs[0])) ``` Esto producirá: ```text <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all. ``` ¡Arr, al final resultó ser fácil! ## ¿Existe un pipeline automatizado para chats? Sí, lo hay! Nuestros canales de generación de texto admiten entradas de chat, cual facilita más facíl utilizar los modelos de chat. En el pasado, solíamos utilizar una clase dedicada "ConversationalPipeline", pero ahora ha quedado obsoleta y su funcionalidad se ha fusionado en [`TextGenerationPipeline`]. Este pipeline está diseñado para facilitar el uso de modelos de chat. Intentemos el ejemplo de `Zephyr` de nuevo, pero esta vez utilizando el pipeline: ```python from transformers import pipeline pipe = pipeline("conversational", "HuggingFaceH4/zephyr-7b-beta") messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] print(pipe(messages, max_new_tokens=128)[0]['generated_text'][-1]) # Print the assistant's response ``` ```text {'role': 'assistant', 'content': "Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all."} ``` La canalización se encargará de todos los detalles de la tokenización y de llamar a `apply_chat_template` por ti. Una vez que el modelo tenga una plantilla de chat, ¡todo lo que necesitas hacer es inicializar el pipeline y pasarle la lista de mensajes! # ¿Qué son los "generation prompts"? Puede que hayas notado que el método `apply_chat_template` tiene un argumento `add_generation_prompt`. Este argumento indica a la plantilla que agregue tokens que indiquen el inicio de una respuesta del bot. Por ejemplo, considera el siguiente chat: ```python messages = [ {"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": "Nice to meet you!"}, {"role": "user", "content": "Can I ask a question?"} ] ``` Así es cómo se verá esto sin un "generation prompt", usando la plantilla ChatML que vimos en el ejemplo de Zephyr: ```python tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False) """<|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> """ ``` Y así es como se ve **con** un "generation prompt": ```python tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) """<|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> <|im_start|>assistant """ ``` Ten en cuenta que esta vez, hemos agregado los tokens que indican el inicio de una respuesta del bot. Esto asegura que cuando el modelo genere texto, escribirá una respuesta del bot en lugar de hacer algo inesperado, como continuar el mensaje del usuario. Recuerda, los modelos de chat siguen siendo solo modelos de lenguaje: están entrenados para continuar texto, ¡y el chat es solo un tipo especial de texto para ellos! Necesitas guiarlos con los tokens de control apropiados para que sepan lo que se supone que deben estar haciendo. No todos los modelos requieren "generation prompts". Algunos modelos, como BlenderBot y LLaMA, no tienen ningún token especial antes de las respuestas del bot. En estos casos, el argumento `add_generation_prompt` no tendrá ningún efecto. El efecto exacto que tiene `add_generation_prompt` dependerá de la plantilla que se esté utilizando. ## ¿Puedo usar plantillas de chat en el entrenamiento? ¡Sí! Recomendamos que apliques la plantilla de chat como un paso de preprocesamiento para tu conjunto de datos. Después de esto, simplemente puedes continuar como cualquier otra tarea de entrenamiento de modelos de lenguaje. Durante el entrenamiento, generalmente deberías establecer `add_generation_prompt=False`, porque los tokens añadidos para solicitar una respuesta del asistente no serán útiles durante el entrenamiento. Veamos un ejemplo: ```python from transformers import AutoTokenizer from datasets import Dataset tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") chat1 = [ {"role": "user", "content": "Which is bigger, the moon or the sun?"}, {"role": "assistant", "content": "The sun."} ] chat2 = [ {"role": "user", "content": "Which is bigger, a virus or a bacterium?"}, {"role": "assistant", "content": "A bacterium."} ] dataset = Dataset.from_dict({"chat": [chat1, chat2]}) dataset = dataset.map(lambda x: {"formatted_chat": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)}) print(dataset['formatted_chat'][0]) ``` Y obtenemos: ```text <|user|> Which is bigger, the moon or the sun?</s> <|assistant|> The sun.</s> ``` Desde aquí, simplemente continúa el entrenamiento como lo harías con una tarea estándar de modelado de lenguaje, utilizando la columna `formatted_chat`. ## Avanzado: ¿Cómo funcionan las plantillas de chat? La plantilla de chat para un modelo se almacena en el atributo `tokenizer.chat_template`. Si no se establece ninguna plantilla de chat, se utiliza en su lugar la plantilla predeterminada para esa clase de modelo. Echemos un vistazo a la plantilla para `BlenderBot`: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> tokenizer.chat_template "{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}" ``` ¡Es un poco intimidante! Vamos a agregar algunas líneas nuevas y sangria para que sea más legible. Ten en cuenta que la primera línea nueva después de cada bloque, así como cualquier espacio en blanco anterior a un bloque, se ignoran de forma predeterminada, utilizando las banderas `trim_blocks` y `lstrip_blocks` de Jinja. Sin embargo, ¡ten cuidado! Aunque el espacio en blanco inicial en cada línea se elimina, los espacios entre bloques en la misma línea no. ¡Te recomendamos encarecidamente que verifiques que tu plantilla no esté imprimiendo espacios adicionales donde no debería estarlo! ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ ' ' }} {% endif %} {{ message['content'] }} {% if not loop.last %} {{ ' ' }} {% endif %} {% endfor %} {{ eos_token }} ``` Si nunca has visto uno de estos antes, esto es una [plantilla de Jinja](https://jinja.palletsprojects.com/en/3.1.x/templates/). Jinja es un lenguaje de plantillas que te permite escribir código simple que genera texto. En muchos aspectos, el código y la sintaxis se asemejan a Python. En Python puro, esta plantilla se vería algo así: ```python for idx, message in enumerate(messages): if message['role'] == 'user': print(' ') print(message['content']) if not idx == len(messages) - 1: # Check for the last message in the conversation print(' ') print(eos_token) ``` Efectivamente, la plantilla hace tres cosas: 1. Para cada mensaje, si el mensaje es un mensaje de usuario, añade un espacio en blanco antes de él, de lo contrario no imprime nada. 2. Añade el contenido del mensaje. 3. Si el mensaje no es el último mensaje, añade dos espacios después de él. Después del último mensaje, imprime el token EOS. Esta es una plantilla bastante simple: no añade ningún token de control y no admite mensajes "del sistema", que son una forma común de dar al modelo directivas sobre cómo debe comportarse en la conversación posterior. ¡Pero Jinja te brinda mucha flexibilidad para hacer esas cosas! Veamos una plantilla de Jinja que pueda formatear las entradas de manera similar a la forma en que LLaMA las formatea (nota que la plantilla real de LLaMA incluye el manejo de mensajes del sistema predeterminados y el manejo de mensajes del sistema ligeramente diferentes en general; ¡no uses esta en tu código real!) ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ bos_token + '[INST] ' + message['content'] + ' [/INST]' }} {% elif message['role'] == 'system' %} {{ '<<SYS>>\\n' + message['content'] + '\\n<</SYS>>\\n\\n' }} {% elif message['role'] == 'assistant' %} {{ ' ' + message['content'] + ' ' + eos_token }} {% endif %} {% endfor %} ``` Si observas esto por un momento, puedas ver lo que esta plantilla está haciendo: añade tokens específicos basados en el "rol" de cada mensaje, que representa quién lo envió. Los mensajes de usuario, asistente y sistema son claramente distinguibles para el modelo debido a los tokens en los que están envueltos. ## Avanzado: Añadiendo y editando plantillas de chat ### ¿Cómo creo una plantilla de chat? Simple, solo escribe una plantilla de Jinja y establece `tokenizer.chat_template`. ¡Puede resultarte más fácil comenzar con una plantilla existente de otro modelo y simplemente editarla según tus necesidades! Por ejemplo, podríamos tomar la plantilla de LLaMA de arriba y añadir "[ASST]" y "[/ASST]" a los mensajes del asistente: ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }} {% elif message['role'] == 'system' %} {{ '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }} {% elif message['role'] == 'assistant' %} {{ '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }} {% endif %} {% endfor %} ``` Ahora, simplemente establece el atributo `tokenizer.chat_template`. ¡La próxima vez que uses [`~PreTrainedTokenizer.apply_chat_template`], se utilizará tu nueva plantilla! Este atributo se guardará en el archivo tokenizer_config.json, por lo que puedes usar [`~utils.PushToHubMixin.push_to_hub`] para cargar tu nueva plantilla en el Hub y asegurarte de que todos estén utilizando la plantilla correcta para tu modelo. ```python template = tokenizer.chat_template template = template.replace("SYS", "SYSTEM") # Change the system token tokenizer.chat_template = template # Set the new template tokenizer.push_to_hub("model_name") # Upload your new template to the Hub! ``` El método [`~PreTrainedTokenizer.apply_chat_template`], que utiliza tu plantilla de chat, es llamado por la clase [`TextGenerationPipeline`], así que una vez que configures la plantilla de chat correcta, tu modelo se volverá automáticamente compatible con [`TextGenerationPipeline`]. <Tip> Si estás ajustando finamente un modelo para chat, además de establecer una plantilla de chat, probablemente deberías agregar cualquier nuevo token de control de chat como los tokens especiales en el tokenizador. Los tokens especiales nunca se dividen, asegurando que tus tokens de control siempre se manejen como tokens únicos en lugar de ser tokenizados en piezas. También deberías establecer el atributo `eos_token` del tokenizador con el token que marca el final de las generaciones del asistente en tu plantilla. Esto asegurará que las herramientas de generación de texto puedan determinar correctamente cuándo detener la generación de texto. </Tip> ### ¿Qué plantilla debería usar? Cuando establezcas la plantilla para un modelo que ya ha sido entrenado para chat, debes asegurarte de que la plantilla coincida exactamente con el formato de mensajes que el modelo vio durante el entrenamiento, o de lo contrario es probable que experimentes degradación del rendimiento. Esto es cierto incluso si estás entrenando aún más el modelo; probablemente obtendrás el mejor rendimiento si mantienes constantes los tokens de chat. Esto es muy análogo a la tokenización: generalmente obtienes el mejor rendimiento para la inferencia o el ajuste fino cuando coincides precisamente con la tokenización utilizada durante el entrenamiento. Si estás entrenando un modelo desde cero o ajustando finamente un modelo de lenguaje base para chat, por otro lado, ¡tienes mucha libertad para elegir una plantilla apropiada! Los LLM son lo suficientemente inteligentes como para aprender a manejar muchos formatos de entrada diferentes. Nuestra plantilla predeterminada para modelos que no tienen una plantilla específica de clase sigue el formato ChatML, y esta es una buena elección flexible para muchos casos de uso. Se ve así: ``` {% for message in messages %} {{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}} {% endfor %} ``` Si te gusta esta plantilla, aquí está en forma de una sola línea, lista para copiar en tu código. La versión de una sola línea también incluye un práctico soporte para [prompts de generación](#¿Qué-son-los-"generation-prompts"?), ¡pero ten en cuenta que no añade tokens de BOS o EOS! Si tu modelo espera esos tokens, no se agregarán automáticamente por `apply_chat_template`, en otras palabras, el texto será tokenizado con `add_special_tokens=False`. Esto es para evitar posibles conflictos entre la plantilla y la lógica de `add_special_tokens`. ¡Si tu modelo espera tokens especiales, asegúrate de añadirlos a la plantilla! ```python tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" ``` Esta plantilla envuelve cada mensaje en tokens `<|im_start|>` y `<|im_end|>`, y simplemente escribe el rol como una cadena, lo que permite flexibilidad en los roles con los que entrenas. La salida se ve así: ```text <|im_start|>system You are a helpful chatbot that will do its best not to say anything so stupid that people tweet about it.<|im_end|> <|im_start|>user How are you?<|im_end|> <|im_start|>assistant I'm doing great!<|im_end|> ``` Los roles "usuario", "sistema" y "asistente" son los estándar para chat, y recomendamos usarlos cuando tenga sentido, particularmente si deseas que tu modelo funcione bien con [`TextGenerationPipeline`]. Sin embargo, no estás limitado a estos roles: la plantilla es extremadamente flexible y cualquier cadena puede ser un rol. ### ¡Quiero añadir algunas plantillas de chat! ¿Cómo debo empezar? Si tienes algún modelo de chat, debes establecer su atributo `tokenizer.chat_template` y probarlo usando [`~PreTrainedTokenizer.apply_chat_template`], luego subir el tokenizador actualizado al Hub. Esto se aplica incluso si no eres el propietario del modelo: si estás usando un modelo con una plantilla de chat vacía o que todavía está utilizando la plantilla predeterminada de clase, por favor abre una solicitud de extracción [pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) al repositorio del modelo para que este atributo se pueda establecer correctamente. Una vez que se establece el atributo, ¡eso es todo, has terminado! `tokenizer.apply_chat_template` ahora funcionará correctamente para ese modelo, ¡lo que significa que también es compatible automáticamente en lugares como `TextGenerationPipeline`! Al asegurarnos de que los modelos tengan este atributo, podemos garantizar que toda la comunidad pueda utilizar todo el poder de los modelos de código abierto. Los desajustes de formato han estado acechando el campo y dañando silenciosamente el rendimiento durante demasiado tiempo: ¡es hora de ponerles fin! ## Avanzado: Consejos para escribir plantillas Si no estás familiarizado con Jinja, generalmente encontramos que la forma más fácil de escribir una plantilla de chat es primero escribir un script de Python corto que formatee los mensajes como desees, y luego convertir ese script en una plantilla. Recuerda que el manejador de plantillas recibirá el historial de conversación como una variable llamada mensajes. Cada mensaje es un diccionario con dos claves, `role` y `content`. Podrás acceder a los `mensajes` en tu plantilla tal como lo harías en Python, lo que significa que puedes recorrerlo con `{% for message in messages %}` o acceder a mensajes individuales con, por ejemplo, `{{ messages[0] }}`. También puedes usar los siguientes consejos para convertir tu código a Jinja: ### Bucles For Los bucles For en Jinja se ven así: ``` {% for message in messages %} {{ message['content'] }} {% endfor %} ``` Ten en cuenta que todo lo que esté dentro del {{bloque de expresión}} se imprimirá en la salida. Puedes usar operadores como `+` para combinar cadenas dentro de bloques de expresión. ### Declaraciones if Las declaraciones if en Jinja se ven así: ``` {% if message['role'] == 'user' %} {{ message['content'] }} {% endif %} ``` Observa cómo donde Python utiliza espacios en blanco para marcar el inicio y el final de los bloques `for` e `if`, Jinja requiere que los termines explícitamente con `{% endfor %}` y `{% endif %}`. ### Variables especiales Dentro de tu plantilla, tendrás acceso a la lista de `mensajes`, pero también puedes acceder a varias otras variables especiales. Estas incluyen tokens especiales como `bos_token` y `eos_token`, así como la variable `add_generation_prompt` que discutimos anteriormente. También puedes usar la variable `loop` para acceder a información sobre la iteración actual del bucle, por ejemplo, usando `{% if loop.last %}` para verificar si el mensaje actual es el último mensaje en la conversación. Aquí tienes un ejemplo que combina estas ideas para agregar un prompt de generación al final de la conversación si add_generation_prompt es `True`: ``` {% if loop.last and add_generation_prompt %} {{ bos_token + 'Assistant:\n' }} {% endif %} ``` ### Notas sobre los espacios en blanco Hemos intentado que Jinja ignore los espacios en blanco fuera de las {{expresiones}} tanto como sea posible. Sin embargo, ten en cuenta que Jinja es un motor de plantillas de propósito general y puede tratar el espacio en blanco entre bloques en la misma línea como significativo e imprimirlo en la salida. ¡Te recomendamos **encarecidamente** que verifiques que tu plantilla no esté imprimiendo espacios adicionales donde no debería antes de subirla!
transformers/docs/source/es/chat_templating.md/0
{ "file_path": "transformers/docs/source/es/chat_templating.md", "repo_id": "transformers", "token_count": 8513 }
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines pour l'inférence L'objet [`pipeline`] rend simple l'utilisation de n'importe quel modèle du [Hub](https://huggingface.co/models) pour l'inférence sur n'importe quelle langue, tâches de vision par ordinateur, d'audio et multimodales. Même si vous n'avez pas d'expérience avec une modalité spécifique ou si vous n'êtes pas familier avec le code ci-dessous des modèles, vous pouvez toujours les utiliser pour l'inférence avec la [`pipeline`] ! Ce tutoriel vous apprendra à : * Utiliser un [`pipeline`] pour l'inférence. * Utiliser un tokenizer ou modèle spécifique. * Utiliser un [`pipeline`] pour des tâches audio, de vision et multimodales. <Tip> Consultez la documentation du [`pipeline`] pour une liste complète des tâches prises en charge et des paramètres disponibles. </Tip> ## Utilisation du pipeline Bien que chaque tâche ait son propre [`pipeline`], il est plus simple d'utiliser le [`pipeline`] générale qui inclut tous les pipelines spécifiques aux différentes tâches. Cette approche charge automatiquement un modèle par défaut et une classe de prétraitement adaptée à votre tâche, simplifiant ainsi votre utilisation. Prenons l'exemple de l'utilisation du [`pipeline`] pour la reconnaissance automatique de la parole (ASR) ou de la transcription de la parole en texte. 1. Commencez par créer un [`pipeline`] et spécifiez la tâche d'inférence : ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition") ``` 2. Passez votre entrée au [`pipeline`]. Dans le cas de la reconnaissance vocale, il s'agit d'un fichier audio : ```py >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'} ``` Pas le résultat que vous aviez en tête ? Consultez certains des [modèles de reconnaissance vocale automatique les plus téléchargés](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending) sur le Hub pour voir si vous pouvez obtenir une meilleure transcription. Essayons le modèle [Whisper large-v2](https://huggingface.co/openai/whisper-large) de OpenAI. Whisper a été publié 2 ans après Wav2Vec2 et a été entraîné sur près de 10 fois plus de données. En tant que tel, il surpasse Wav2Vec2 sur la plupart des benchmarks en aval. Il a également l'avantage supplémentaire de prédire la ponctuation et la casse, ce qui n'est pas possible avec Wav2Vec2. Essayons-le ici pour voir comment il fonctionne : ```py >>> transcriber = pipeline(model="openai/whisper-large-v2") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` Maintenant, ce résultat semble plus précis ! Pour une comparaison approfondie entre Wav2Vec2 et Whisper, consultez le [cours Audio Transformers](https://huggingface.co/learn/audio-course/chapter5/asr_models). Nous vous encourageons vraiment à consulter le Hub pour des modèles dans différentes langues, des modèles spécialisés dans votre domaine, et plus encore. Vous pouvez consulter et comparer les résultats des modèles directement depuis votre navigateur sur le Hub pour voir s'ils conviennent ou gèrent mieux les cas particuliers que d'autres. Et si vous ne trouvez pas de modèle pour votre cas d'utilisation, vous pouvez toujours commencer à [entraîner](training) le vôtre ! Si vous avez plusieurs entrées, vous pouvez passer votre entrée sous forme de liste : ```py transcriber( [ "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac", "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac", ] ) ``` Les pipelines sont excellents pour l'expérimentation car passer d'un modèle à un autre est trivial ; cependant, il existe des moyens de les optimiser pour des charges de travail plus importantes que l'expérimentation. Consultez les guides suivants qui expliquent comment itérer sur des ensembles de données complets ou utiliser des pipelines dans un serveur web : de la documentation : * [Utilisation des pipelines sur un ensemble de données](#using-pipelines-on-a-dataset) * [Utilisation des pipelines pour un serveur web](./pipeline_webserver) ## Paramètres [`pipeline`] prend en charge de nombreux paramètres ; certains sont spécifiques à la tâche et d'autres sont généraux pour tous les pipelines. En général, vous pouvez spécifier les paramètres où vous le souhaitez : ```py transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1) out = transcriber(...) # This will use `my_parameter=1`. out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`. out = transcriber(...) # This will go back to using `my_parameter=1`. ``` Voyons 3 paramètres importants : ### Device Si vous utilisez `device=n`, le pipeline met automatiquement le modèle sur l'appareil spécifié. Cela fonctionnera que vous utilisiez PyTorch ou Tensorflow. ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0) ``` Si le modèle est trop grand pour un seul GPU et que vous utilisez PyTorch, vous pouvez définir `device_map="auto"` pour déterminer automatiquement comment charger et stocker les poids du modèle. L'utilisation de l'argument `device_map` nécessite le package 🤗 [Accelerate](https://huggingface.co/docs/accelerate) : ```bash pip install --upgrade accelerate ``` Le code suivant charge et stocke automatiquement les poids du modèle sur plusieurs appareils : ```py transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto") ``` Notez que si `device_map="auto"` est passé, il n'est pas nécessaire d'ajouter l'argument `device=device` lors de l'instanciation de votre `pipeline` car vous pourriez rencontrer des comportements inattendus ! ### Batch size Par défaut, les pipelines ne feront pas d'inférence en batch pour des raisons expliquées en détail [ici](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching). La raison est que le batching n'est pas nécessairement plus rapide, et peut en fait être beaucoup plus lent dans certains cas. Mais si cela fonctionne dans votre cas d'utilisation, vous pouvez utiliser : ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2) audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)] texts = transcriber(audio_filenames) ``` Cela exécute le pipeline sur les 4 fichiers audio fournis, mais les passera par batch de 2 au modèle (qui est sur un GPU, où le batching est plus susceptible d'aider) sans nécessiter de code supplémentaire de votre part. La sortie doit toujours correspondre à ce que vous auriez reçu sans batching. Il s'agit uniquement d'un moyen de vous aider à obtenir plus de vitesse avec un pipeline. Les pipelines peuvent également atténuer certaines des complexités du batching car, pour certains pipelines, un seul élément (comme un long fichier audio) doit être divisé en plusieurs parties pour être traité par un modèle. Le pipeline effectue ce [*batching par morceaux*](./main_classes/pipelines#pipeline-chunk-batching) pour vous. ### Paramètres spécifiques à la tâche Toutes les tâches fournissent des paramètres spécifiques à la tâche qui permettent une flexibilité et des options supplémentaires pour vous aider à accomplir votre travail. Par exemple, la méthode [`transformers.AutomaticSpeechRecognitionPipeline.__call__`] dispose d'un paramètre `return_timestamps` qui semble prometteur pour le sous-titrage des vidéos : ```py >>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]} ``` Comme vous pouvez le voir, le modèle a inféré le texte et a également indiqué **quand** les différentes phrases ont été prononcées. Il existe de nombreux paramètres disponibles pour chaque tâche, alors consultez la référence API de chaque tâche pour voir ce que vous pouvez ajuster ! Par exemple, le [`~transformers.AutomaticSpeechRecognitionPipeline`] dispose d'un paramètre `chunk_length_s` qui est utile pour travailler sur des fichiers audio très longs (par exemple, le sous-titrage de films entiers ou de vidéos d'une heure) qu'un modèle ne peut généralement pas gérer seul : ```python >>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30) >>> transcriber("https://huggingface.co/datasets/reach-vb/random-audios/resolve/main/ted_60.wav") {'text': " So in college, I was a government major, which means I had to write a lot of papers. Now, when a normal student writes a paper, they might spread the work out a little like this. So, you know. You get started maybe a little slowly, but you get enough done in the first week that with some heavier days later on, everything gets done and things stay civil. And I would want to do that like that. That would be the plan. I would have it all ready to go, but then actually the paper would come along, and then I would kind of do this. And that would happen every single paper. But then came my 90-page senior thesis, a paper you're supposed to spend a year on. I knew for a paper like that, my normal workflow was not an option, it was way too big a project. So I planned things out and I decided I kind of had to go something like this. This is how the year would go. So I'd start off light and I'd bump it up"} ``` Si vous ne trouvez pas un paramètre qui vous aiderait vraiment, n'hésitez pas à [le demander](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml) ! ## Utilisation des pipelines sur un ensemble de données Le pipeline peut également exécuter des inférences sur un grand ensemble de données. Le moyen le plus simple que nous recommandons pour cela est d'utiliser un itérateur : ```py def data(): for i in range(1000): yield f"My example {i}" pipe = pipeline(model="openai-community/gpt2", device=0) generated_characters = 0 for out in pipe(data()): generated_characters += len(out[0]["generated_text"]) ``` L'itérateur `data()` génère chaque résultat, et le pipeline reconnaît automatiquement que l'entrée est itérable et commencera à récupérer les données tout en continuant à les traiter sur le GPU (cela utilise [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) sous le capot). C'est important car vous n'avez pas besoin d'allouer de mémoire pour l'ensemble de données complet et vous pouvez alimenter le GPU aussi rapidement que possible. Étant donné que le lotissement pourrait accélérer les choses, il peut être utile d'essayer de régler le paramètre `batch_size` ici. La façon la plus simple d'itérer sur un ensemble de données est d'en charger un depuis 🤗 [Datasets](https://github.com/huggingface/datasets) : ```py # KeyDataset is a util that will just output the item we're interested in. from transformers.pipelines.pt_utils import KeyDataset from datasets import load_dataset pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]") for out in pipe(KeyDataset(dataset, "audio")): print(out) ``` ## Utilisation des pipelines pour un serveur web <Tip> Créer un moteur d'inférence est un sujet complexe qui mérite sa propre page. </Tip> [Lien](./pipeline_webserver) ## Pipeline de vision Utiliser un [`pipeline`] pour les tâches de vision est pratiquement identique. Spécifiez votre tâche et passez votre image au classificateur. L'image peut être un lien, un chemin local ou une image encodée en base64. Par exemple, quelle espèce de chat est montrée ci-dessous ? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(model="google/vit-base-patch16-224") >>> preds = vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ``` ## Pipeline de texte Utiliser un [`pipeline`] pour les tâches de NLP est pratiquement identique. ```py >>> from transformers import pipeline >>> # This model is a `zero-shot-classification` model. >>> # It will classify text, except you are free to choose any label you might imagine >>> classifier = pipeline(model="facebook/bart-large-mnli") >>> classifier( ... "I have a problem with my iphone that needs to be resolved asap!!", ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], ... ) {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} ``` ## Pipeline multimodal Le [`pipeline`] prend en charge plus d'une modalité. Par exemple, une tâche de réponse à des questions visuelles (VQA) combine texte et image. N'hésitez pas à utiliser n'importe quel lien d'image que vous aimez et une question que vous souhaitez poser à propos de l'image. L'image peut être une URL ou un chemin local vers l'image. Par exemple, si vous utilisez cette [image de facture](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png) : ```py >>> from transformers import pipeline >>> vqa = pipeline(model="impira/layoutlm-document-qa") >>> output = vqa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) >>> output[0]["score"] = round(output[0]["score"], 3) >>> output [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` <Tip> Pour exécuter l'exemple ci-dessus, vous devez avoir [`pytesseract`](https://pypi.org/project/pytesseract/) installé en plus de 🤗 Transformers : ```bash sudo apt install -y tesseract-ocr pip install pytesseract ``` </Tip> ## Utilisation de `pipeline` sur de grands modèles avec 🤗 `accelerate` : Vous pouvez facilement exécuter `pipeline` sur de grands modèles en utilisant 🤗 `accelerate` ! Assurez-vous d'abord d'avoir installé `accelerate` avec `pip install accelerate`. Chargez d'abord votre modèle en utilisant `device_map="auto"` ! Nous utiliserons `facebook/opt-1.3b` pour notre exemple. ```py # pip install accelerate import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", torch_dtype=torch.bfloat16, device_map="auto") output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` Vous pouvez également passer des modèles chargés en 8 bits si vous installez `bitsandbytes` et ajoutez l'argument `load_in_8bit=True` Notez que vous pouvez remplacer le point de contrôle par n'importe quel modèle. ```py # pip install accelerate bitsandbytes import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", device_map="auto", model_kwargs={"load_in_8bit": True}) output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` ## Création de démonstrations web à partir de pipelines avec `gradio` Hugging Face prenant en charge le chargement de grands modèles, comme BLOOM. Les pipelines sont automatiquement pris en charge dans [Gradio](https://github.com/gradio-app/gradio/), une bibliothèque qui facilite la création d'applications d'apprentissage automatique belles et conviviales sur le web. Tout d'abord, assurez-vous que Gradio est installé : ``` pip install gradio ``` Ensuite, vous pouvez créer une démonstration web autour d'un pipeline de classification d'images (ou tout autre pipeline) en une seule ligne de code en appelant la fonction [`Interface.from_pipeline`](https://www.gradio.app/docs/interface#interface-from-pipeline) de Gradio pour lancer le pipeline. Cela crée une interface intuitive de glisser-déposer dans votre navigateur : ```py from transformers import pipeline import gradio as gr pipe = pipeline("image-classification", model="google/vit-base-patch16-224") gr.Interface.from_pipeline(pipe).launch() ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/panda-classification.png) Par défaut, la démonstration web s'exécute sur un serveur local. Si vous souhaitez la partager avec d'autres, vous pouvez générer un lien public temporaire en définissant `share=True` dans `launch()`. Vous pouvez également héberger votre démonstration sur [Hugging Face Spaces](https://huggingface.co/spaces) pour obtenir un lien permanent.
transformers/docs/source/fr/tutoriel_pipeline.md/0
{ "file_path": "transformers/docs/source/fr/tutoriel_pipeline.md", "repo_id": "transformers", "token_count": 6226 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Debugging ## Debug dei problemi di rete multi-GPU Quando addestri o fai inferenza con `DistributedDataParallel` e GPU multiple, se si verificano problemi di intercomunicazione tra processi e/o nodi, puoi utilizzare il seguente script per diagnosticare i problemi della rete. ```bash wget https://raw.githubusercontent.com/huggingface/transformers/main/scripts/distributed/torch-distributed-gpu-test.py ``` Per esempio per testare come 2 GPU interagiscono fai: ```bash python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` Se entrambi i processi sono in grado di comunicare tra loro e di allocare la memoria della GPU, ciascuno di essi stamperà lo stato OK. Per più GPU o nodi adatta gli argumenti nello script. All'interno dello script di diagnostica troverai molti altri dettagli e anche una guida per eseguirlo in ambiente SLURM. Un livello di debug superiore è aggiungere la variabile d'ambiente `NCCL_DEBUG=INFO` come di seguito: ```bash NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` In questo modo si scaricano molte informazioni di debug relative a NCCL, che puoi cercare online in caso di problemi. Oppure, se non hai la sicurezza di come interpretare l'output, puoi condividere il file di log in una Issue. ## Rilevamento di Underflow e Overflow <Tip> Questa funzionalità al momento è disponibile solo per PyTorch. </Tip> <Tip> Per addestramento multi-GPU richiede DDP (`torch.distributed.launch`). </Tip> <Tip> Questa funzionalità può essere usata con modelli basati su `nn.Module`. </Tip> Se inizi a ottenere `loss=NaN` o il modello presenta qualche altro comportamento anomalo a causa di valori `inf` o `nan` in attivazioni o nei pesi, è necessario scoprire dove si verifica il primo underflow o overflow e cosa lo ha determinato. Fortunatamente è possibile farlo facilmente attivando un modulo speciale che effettuerà il rilevamento automaticamente. Se stai usando [`Trainer`], hai bisogno di aggiungere solo: ```bash --debug underflow_overflow ``` ai normali argomenti della riga di comando, o passa `debug="underflow_overflow"` quando viene creato l'oggetto [`TrainingArguments`]. Se stai usando il tuo ciclo di allenamento o un altro trainer, puoi ottenere lo stesso risultato con: ```python from .debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model) ``` [`~debug_utils.DebugUnderflowOverflow`] inserisce dei ganci nel modello che dopo ogni chiamata testeranno le variabili di ingresso e di uscita e anche i pesi del modulo corrispondente. Non appena viene rilevato `inf` o o `nan` in almeno un elemento delle attivazioni o dei pesi, il programma lo notifica e stampa un rapporto come il seguente (questo è stato rilevato con `google/mt5-small` sotto fp16 mixed precision): ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata encoder.block.1.layer.1.DenseReluDense.dropout Dropout 0.00e+00 2.57e+02 input[0] 0.00e+00 2.85e+02 output [...] encoder.block.2.layer.0 T5LayerSelfAttention 6.78e-04 3.15e+03 input[0] 2.65e-04 3.42e+03 output[0] None output[1] 2.25e-01 1.00e+04 output[2] encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.dropout Dropout 0.00e+00 8.76e+03 input[0] 0.00e+00 9.74e+03 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` L'output di esempio è stato tagliato al centro per brevità. La seconda colonna mostra il valore dell'elemento più grande in assoluto,così se osserviamo da vicino gli ultimi istanti, input e output sono nel range di `1e4`. Questo addestramento è stato eseguito con una mixed precision fp16 e l'ultimo passo usciva fuori (sotto `fp16` il valore più grande prima di `inf` è `64e3`). Per evitare overflows sotto `fp16` le attivazionioni devono rimanere molto al di sotto di `1e4`, perché `1e4 * 1e4 = 1e8` quindi qualsiasi moltiplicazione di matrice con grandi attivazioni porterà a una condizione di overflow numerico. All'inizio della traccia è possibile scoprire a quale lotto si è verificato il problema (questo `Detected inf/nan during batch_number=0` significa che il problema si è verificato nel primo lotto). Ogni frame segnalato inizia dichiarando la voce completamente qualificata per il modulo corrispondente per il quale il frame è stato segnalato. Se osserviamo il seguente frame: ``` encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output ``` Questo, `encoder.block.2.layer.1.layer_norm` indica che si tratta di un layer norm nel primo layer, del secondo blocco dell'encoder. E le chiamata specifica di `forward` è `T5LayerNorm`. Osserviamo gli ultimi frame del report: ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` L'ultimo frame report per la funzione `Dropout.forward` con la prima voce per l'unico input e la seconda per l'unico output. Si può notare che è stato richiamato da un attibuto `dropout` dentro la classe `DenseReluDense`. Si può notare che ciò è avvenuto durante il primo strato, del 2° blocco, durante il primissimo lotto. Infine, gli elementi di input più grandi in assoluto sono stati `6.27e+04` e l'equivalente per l'output era `inf`. Puoi vedere qui, che `T5DenseGatedGeluDense.forward` risulta in output activations, il cui valore massimo assoluto era circa 62,7K, che è molto vicino al limite massimo di 64K di fp16. Nel prossimo frame abbiamo `Dropout` che rinormalizza i pesi, dopo aver azzerato alcuni elementi, il che spinge il valore massimo assoluto a più di 64K e si verifica un overflow.(`inf`). Come puoi notare, è nei frames precedenti che occorre esaminare quando i numeri iniziano a diventare molto grandi per i valori fp16. Confrontiamo il report al codice `models/t5/modeling_t5.py`: ```python class T5DenseGatedGeluDense(nn.Module): def __init__(self, config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.gelu_act = ACT2FN["gelu_new"] def forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states ``` Ora è facile vedere la chiamata `dropout`, e tutte le chiamate precedenti. Poiché il rilevamento avviene in un avanzamento (forward hook in eng.), i rapporti vengono creati immeditamente dopo ogni rientro da `forward` (forward returns in eng.). Tornando al rapporto completo, per agire e risolvere il problema, dobbiamo andare qualche frame più in alto, dove i numeri hanno iniziato a salire, e probabilmente passare alla modalità `fp32`, in modo che i numeri non trabocchino quando vengono moltiplicati o sommati. Naturalmente, potrebbero esserci altre soluzioni. Per esempio, potremmo spegnere temporanemante `amp` se è abilitato, successivamente spostare `forward` in un helper wrapper, come: ```python def _forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states import torch def forward(self, hidden_states): if torch.is_autocast_enabled(): with torch.cuda.amp.autocast(enabled=False): return self._forward(hidden_states) else: return self._forward(hidden_states) ``` Poiché il rilevatore automatico riporta solo gli ingressi e le uscite di fotogrammi completi, una volta che si sa dove cercare, si può analizzare anche le fasi intermedie di una specifica funzione `forward`. In alcuni casi puoi usare la funzione di supporto `detect_overflow` per indirizzare il rilevatore dove preferisci, ad esempio: ```python from debug_utils import detect_overflow class T5LayerFF(nn.Module): [...] def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) detect_overflow(forwarded_states, "after layer_norm") forwarded_states = self.DenseReluDense(forwarded_states) detect_overflow(forwarded_states, "after DenseReluDense") return hidden_states + self.dropout(forwarded_states) ``` Si può vedere che abbiamo aggiunto 2 di questi e ora teniamo traccia se `inf` o `nan` per `forwarded_states` è stato rilevato da qualche parte. In realtà, il rilevatore li riporta già, perché ciascuna delle chiamate nell'esempio precedente è un `nn.Module`, ma diciamo che se avessimo dei calcoli diretti locali, questo è il modo in cui lo faremmo. Inoltre, se si istanzia il debugger nel proprio codice, è possibile modificare il numero di fotogrammi stampati rispetto a predefinito, ad esempio.: ```python from .debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) ``` ### Tracciamento della mistura assoluta del lotto specifico e del valore massimo La stessa classe di debug può essere utilizzata per il tracciamento per-batch con la funzione di rilevamento di underflow/overflow disattivata. Supponiamo di voler osservare i valori minimi e massimi assoluti per tutti gli ingredienti di ogni chiamata `forward` di un dato lotto. lotto, e che lo si voglia fare solo per i lotti 1 e 3. Si istanzia questa classe come: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) ``` Ora i batch completi 1 e 3 saranno tracciati utilizzando lo stesso formato del rilevatore di underflow/overflow. I batches sono 0-indexed. Questo è utile se si sa che il programma inizia a comportarsi male dopo un certo numero di batch, in modo da poter avanzare velocemente fino a quell'area. direttamente a quell'area. Ecco un esempio di output troncato per questa configurazione: ``` *** Starting batch number=1 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.47e+04 input[0] 5.36e-05 7.92e+02 output [...] decoder.dropout Dropout 1.60e-07 2.27e+01 input[0] 0.00e+00 2.52e+01 output decoder T5Stack not a tensor output lm_head Linear 1.01e-06 7.92e+02 weight 0.00e+00 1.11e+00 input[0] 6.06e-02 8.39e+01 output T5ForConditionalGeneration not a tensor output *** Starting batch number=3 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.78e+04 input[0] 5.36e-05 7.92e+02 output [...] ``` Qui verrà scaricato un numero enorme di fotogrammi, tanti quanti sono le chiamate in avanti nel modello, quindi può essere o non essere quello che volete, ma a volte può essere più utile usarlo di un classico debugger. Per esempio, se il problema inizia a verificarsi a partire dal lotto numero 150. Quindi è possibile scaricare le tracce dei lotti 149 e 150 e confrontare i punti in cui i numeri hanno iniziato a divergere. È inoltre possibile specificare il numero di batch dopo il quale interrompere l'addestramento, con: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) ```
transformers/docs/source/it/debugging.md/0
{ "file_path": "transformers/docs/source/it/debugging.md", "repo_id": "transformers", "token_count": 5635 }
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Controlli su una Pull Request Quando apri una pull request sui 🤗 Transformers, vengono eseguiti un discreto numero di controlli per assicurarsi che la patch che stai aggiungendo non stia rompendo qualcosa di esistente. Questi controlli sono di quattro tipi: - test regolari - costruzione della documentazione - stile del codice e della documentazione - coerenza generale del repository In questo documento, cercheremo di spiegare quali sono i vari controlli e le loro ragioni, oltre a spiegare come eseguire il debug locale se uno di essi fallisce sulla tua PR. Nota che tutti richiedono un'installazione dev: ```bash pip install transformers[dev] ``` o un'installazione modificabile: ```bash pip install -e .[dev] ``` all'interno del repo Transformers. ## Tests Tutti i job che iniziano con `ci/circleci: run_tests_` eseguono parti della suite di test dei Transformers. Ognuno di questi job si concentra su una parte della libreria in un determinato ambiente: per esempio `ci/circleci: run_tests_pipelines_tf` esegue il test delle pipeline in un ambiente in cui è installato solo TensorFlow. Nota che per evitare di eseguire i test quando non ci sono cambiamenti reali nei moduli che si stanno testando, ogni volta viene eseguita solo una parte della suite di test: viene eseguita una utility per determinare le differenze nella libreria tra prima e dopo la PR (ciò che GitHub mostra nella scheda "Files changes") e sceglie i test che sono stati impattati dalla diff. Questa utility può essere eseguita localmente con: ```bash python utils/tests_fetcher.py ``` dalla root del repo Transformers. Di seguito ciò che farà: 1. Controlla per ogni file nel diff se le modifiche sono nel codice o solo nei commenti o nelle docstrings. Vengono mantenuti solo i file con modifiche reali al codice. 2. Costruisce una mappa interna che fornisce per ogni file del codice sorgente della libreria tutti i file su cui ha un impatto ricorsivo. Si dice che il modulo A ha un impatto sul modulo B se il modulo B importa il modulo A. Per l'impatto ricorsivo, abbiamo bisogno di una catena di moduli che va dal modulo A al modulo B in cui ogni modulo importa il precedente. 3. Applica questa mappa ai file raccolti nel passaggio 1, si ottiene l'elenco dei file del modello interessati dalla PR. 4. Mappa ciascuno di questi file con i corrispondenti file di test e ottiene l'elenco dei test da eseguire. Quando esegui lo script in locale, dovresti ottenere la stampa dei risultati dei passi 1, 3 e 4 e quindi sapere quali test sono stati eseguiti. Lo script creerà anche un file chiamato `test_list.txt` che contiene l'elenco dei test da eseguire e che puoi eseguire localmente con il seguente comando: ```bash python -m pytest -n 8 --dist=loadfile -rA -s $(cat test_list.txt) ``` Nel caso in cui qualcosa sia sfuggito, l'intera suite di test viene eseguita quotidianamente. ## Build della documentazione Il job `ci/circleci: build_doc` esegue una build della documentazione per assicurarsi che tutto sia a posto una volta che la PR è stata unita. Se questo passaggio fallisce, puoi controllare localmente entrando nella cartella `docs` del repo Transformers e digitare ```bash make html ``` Sphinx non è noto per i suoi messaggi di errore chiari, quindi potrebbe essere necessario che provi alcune cose per trovare davvero la fonte dell'errore. ## Stile del codice e della documentazione La formattazione del codice viene applicata a tutti i file sorgenti, agli esempi e ai test usando `black` e `isort`. Abbiamo anche uno strumento personalizzato che si occupa della formattazione delle docstring e dei file `rst` (`utils/style_doc.py`), così come dell'ordine dei lazy imports eseguiti nei file `__init__.py` dei Transformers (`utils/custom_init_isort.py`). Tutto questo può essere lanciato eseguendo ```bash make style ``` I controlli della CI sono applicati all'interno del controllo `ci/circleci: check_code_quality`. Esegue anche `flake8`, che dà un'occhiata di base al codice e si lamenta se trova una variabile non definita o non utilizzata. Per eseguire questo controllo localmente, usare ```bash make quality ``` Questa operazione può richiedere molto tempo, quindi per eseguire la stessa operazione solo sui file modificati nel branch corrente, eseguire ```bash make fixup ``` Quest'ultimo comando eseguirà anche tutti i controlli aggiuntivi per la consistenza del repository. Diamogli un'occhiata. ## Coerenza del repository All'interno sono raggruppati tutti i test per assicurarsi che la tua PR lasci il repository in un buono stato ed è eseguito dal controllo `ci/circleci: check_repository_consistency`. Puoi eseguire localmente questo controllo eseguendo quanto segue: ```bash make repo-consistency ``` Questo verifica che: - Tutti gli oggetti aggiunti all'init sono documentati (eseguito da `utils/check_repo.py`) - Tutti i file `__init__.py` hanno lo stesso contenuto nelle loro due sezioni (eseguito da `utils/check_inits.py`) - Tutto il codice identificato come copia da un altro modulo è coerente con l'originale (eseguito da `utils/check_copies.py`) - Le traduzioni dei README e l'indice della documentazione hanno lo stesso elenco di modelli del README principale (eseguito da `utils/check_copies.py`) - Le tabelle autogenerate nella documentazione sono aggiornate (eseguito da `utils/check_table.py`) - La libreria ha tutti gli oggetti disponibili anche se non tutte le dipendenze opzionali sono installate (eseguito da `utils/check_dummies.py`) Se questo controllo fallisce, le prime due voci richiedono una correzione manuale, mentre le ultime quattro possono essere corrette automaticamente per te eseguendo il comando ```bash make fix-copies ``` Ulteriori controlli riguardano le PR che aggiungono nuovi modelli, principalmente che: - Tutti i modelli aggiunti sono in un Auto-mapping (eseguita da `utils/check_repo.py`) <!-- TODO Sylvain, add a check that makes sure the common tests are implemented.--> - Tutti i modelli sono testati correttamente (eseguito da `utils/check_repo.py`) <!-- TODO Sylvain, add the following - All models are added to the main README, inside the main doc - All checkpoints used actually exist on the Hub -->
transformers/docs/source/it/pr_checks.md/0
{ "file_path": "transformers/docs/source/it/pr_checks.md", "repo_id": "transformers", "token_count": 2370 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Sharing custom models 🤗 Transformersライブラリは、簡単に拡張できるように設計されています。すべてのモデルはリポジトリの特定のサブフォルダに完全にコード化されており、抽象化はありません。したがって、モデリングファイルをコピーして調整することが簡単です。 新しいモデルを書いている場合、ゼロから始める方が簡単かもしれません。このチュートリアルでは、カスタムモデルとその設定をどのように書き、Transformers内で使用できるようにし、コードに依存する共同体と共有する方法を説明します。ライブラリに存在しない場合でも、誰でも使用できるようにします。 これを実証するために、[timmライブラリ](https://github.com/rwightman/pytorch-image-models)のResNetクラスを[`PreTrainedModel`]にラップすることによって、ResNetモデルを使用します。 ## Writing a custom configuration モデルに取り組む前に、まずその設定を書きましょう。モデルの設定は、モデルを構築するために必要なすべての情報を含むオブジェクトです。次のセクションで見るように、モデルは初期化するために`config`しか受け取ることができないため、そのオブジェクトができるだけ完全である必要があります。 この例では、ResNetクラスのいくつかの引数を取得し、調整したいかもしれないとします。異なる設定は、異なるタイプのResNetを提供します。その後、これらの引数を確認した後、それらの引数を単に格納します。 ```python from transformers import PretrainedConfig from typing import List class ResnetConfig(PretrainedConfig): model_type = "resnet" def __init__( self, block_type="bottleneck", layers: List[int] = [3, 4, 6, 3], num_classes: int = 1000, input_channels: int = 3, cardinality: int = 1, base_width: int = 64, stem_width: int = 64, stem_type: str = "", avg_down: bool = False, **kwargs, ): if block_type not in ["basic", "bottleneck"]: raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.") if stem_type not in ["", "deep", "deep-tiered"]: raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.") self.block_type = block_type self.layers = layers self.num_classes = num_classes self.input_channels = input_channels self.cardinality = cardinality self.base_width = base_width self.stem_width = stem_width self.stem_type = stem_type self.avg_down = avg_down super().__init__(**kwargs) ``` 重要なことを3つ覚えておくべきポイントは次のとおりです: - `PretrainedConfig` を継承する必要があります。 - あなたの `PretrainedConfig` の `__init__` は任意の kwargs を受け入れる必要があります。 - これらの `kwargs` は親クラスの `__init__` に渡す必要があります。 継承は、🤗 Transformers ライブラリのすべての機能を取得できるようにするためです。他の2つの制約は、 `PretrainedConfig` が設定しているフィールド以外にも多くのフィールドを持っていることから来ています。 `from_pretrained` メソッドで設定を再ロードする場合、これらのフィールドはあなたの設定に受け入れられ、 その後、親クラスに送信される必要があります。 設定の `model_type` を定義すること(ここでは `model_type="resnet"`)は、 自動クラスにモデルを登録したい場合を除いては必須ではありません(最後のセクションを参照)。 これで、ライブラリの他のモデル設定と同様に、設定を簡単に作成して保存できます。 以下は、resnet50d 設定を作成して保存する方法の例です: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d_config.save_pretrained("custom-resnet") ``` これにより、`custom-resnet` フォルダ内に `config.json` という名前のファイルが保存されます。その後、`from_pretrained` メソッドを使用して構成を再ロードできます。 ```py resnet50d_config = ResnetConfig.from_pretrained("custom-resnet") ``` また、[`PretrainedConfig`] クラスの他のメソッドを使用することもできます。たとえば、[`~PretrainedConfig.push_to_hub`] を使用して、設定を直接 Hub にアップロードできます。 ## Writing a custom model ResNet の設定ができたので、モデルを書き始めることができます。実際には2つのモデルを書きます。1つはバッチの画像から隠れた特徴を抽出するモデル([`BertModel`] のようなもの)で、もう1つは画像分類に適したモデル([`BertForSequenceClassification`] のようなもの)です。 前述したように、この例をシンプルに保つために、モデルの緩いラッパーのみを書きます。このクラスを書く前に行う必要がある唯一のことは、ブロックタイプと実際のブロッククラスの間のマップです。その後、すべてを `ResNet` クラスに渡して設定からモデルを定義します: ```py from transformers import PreTrainedModel from timm.models.resnet import BasicBlock, Bottleneck, ResNet from .configuration_resnet import ResnetConfig BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck} class ResnetModel(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor): return self.model.forward_features(tensor) ``` 画像を分類するモデルの場合、forwardメソッドを変更するだけです: ```py import torch class ResnetModelForImageClassification(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor, labels=None): logits = self.model(tensor) if labels is not None: loss = torch.nn.functional.cross_entropy(logits, labels) return {"loss": loss, "logits": logits} return {"logits": logits} ``` 両方の場合、`PreTrainedModel`から継承し、`config`を使用してスーパークラスの初期化を呼び出します(通常の`torch.nn.Module`を書くときのような感じです)。 `config_class`を設定する行は必須ではありませんが、(最後のセクションを参照)、モデルを自動クラスに登録したい場合に使用できます。 <Tip> モデルがライブラリ内のモデルと非常に似ている場合、このモデルと同じ構成を再利用できます。 </Tip> モデルが返す内容は何でも構いませんが、ラベルが渡されるときに損失を含む辞書を返す(`ResnetModelForImageClassification`のように行ったもの)と、 モデルを[`Trainer`]クラス内で直接使用できるようになります。独自のトレーニングループまたは他のライブラリを使用する予定である限り、 別の出力形式を使用することも問題ありません。 さて、モデルクラスができたので、1つ作成しましょう: ```py resnet50d = ResnetModelForImageClassification(resnet50d_config) ``` 再度、[`PreTrainedModel`]のいずれかのメソッド、例えば[`~PreTrainedModel.save_pretrained`]や [`~PreTrainedModel.push_to_hub`]などを使用できます。次のセクションでは、モデルの重みをコードと一緒に Hugging Face Hub にプッシュする方法を見てみます。 しかし、まずはモデル内に事前学習済みの重みをロードしましょう。 独自のユースケースでは、おそらく独自のデータでカスタムモデルをトレーニングすることになるでしょう。 このチュートリアルではスピードアップのために、resnet50dの事前学習済みバージョンを使用します。 私たちのモデルはそれをラップするだけなので、これらの重みを転送するのは簡単です: ```py import timm pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` さて、[`~PreTrainedModel.save_pretrained`]または[`~PreTrainedModel.push_to_hub`]を実行したときに、 モデルのコードが保存されるようにする方法を見てみましょう。 ## Sending the code to the Hub <Tip warning={true}> このAPIは実験的であり、次のリリースでわずかな変更があるかもしれません。 </Tip> まず、モデルが`.py`ファイルに完全に定義されていることを確認してください。 ファイルは相対インポートを他のファイルに依存できますが、すべてのファイルが同じディレクトリにある限り(まだこの機能ではサブモジュールはサポートしていません)、問題ありません。 この例では、現在の作業ディレクトリ内に名前が「resnet_model」のフォルダを作成し、その中に`modeling_resnet.py`ファイルと`configuration_resnet.py`ファイルを定義します。 構成ファイルには`ResnetConfig`のコードが含まれ、モデリングファイルには`ResnetModel`と`ResnetModelForImageClassification`のコードが含まれています。 ``` . └── resnet_model ├── __init__.py ├── configuration_resnet.py └── modeling_resnet.py ``` `__init__.py`は空であっても問題ありません。Pythonが`resnet_model`をモジュールとして検出できるようにするために存在します。 <Tip warning={true}> ライブラリからモデリングファイルをコピーする場合、ファイルの先頭にあるすべての相対インポートを`transformers`パッケージからインポートに置き換える必要があります。 </Tip> 既存の設定やモデルを再利用(またはサブクラス化)できることに注意してください。 コミュニティとモデルを共有するために、次の手順に従ってください:まず、新しく作成したファイルからResNetモデルと設定をインポートします: ```py from resnet_model.configuration_resnet import ResnetConfig from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification ``` 次に、`save_pretrained`メソッドを使用してこれらのオブジェクトのコードファイルをコピーし、特定のAutoクラス(特にモデルの場合)に正しく登録するようライブラリに指示する必要があります。次のように実行します: ```py ResnetConfig.register_for_auto_class() ResnetModel.register_for_auto_class("AutoModel") ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification") ``` 注意: 設定については自動クラスを指定する必要はありません(設定用の自動クラスは1つしかなく、[`AutoConfig`]です)が、 モデルについては異なります。カスタムモデルは多くの異なるタスクに適している可能性があるため、 モデルが正確な自動クラスのうちどれに適しているかを指定する必要があります。 次に、前述のように設定とモデルを作成しましょう: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d = ResnetModelForImageClassification(resnet50d_config) pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` モデルをHubに送信するには、ログインしていることを確認してください。ターミナルで次のコマンドを実行します: ```bash huggingface-cli login ``` またはノートブックから: ```py from huggingface_hub import notebook_login notebook_login() ``` 次に、次のようにして、独自の名前空間にプッシュできます(または、メンバーである組織にプッシュできます): ```py resnet50d.push_to_hub("custom-resnet50d") ``` モデリングの重みとJSON形式の構成に加えて、このフォルダー「custom-resnet50d」内のモデリングおよび構成「.py」ファイルもコピーされ、結果はHubにアップロードされました。結果はこの[model repo](https://huggingface.co/sgugger/custom-resnet50d)で確認できます。 詳細については、[Hubへのプッシュ方法](model_sharing)を参照してください。 ## Using a model with custom code 自動クラスと `from_pretrained` メソッドを使用して、リポジトリ内のカスタムコードファイルと共に任意の構成、モデル、またはトークナイザを使用できます。 Hubにアップロードされるすべてのファイルとコードはマルウェアのスキャンが実施されます(詳細は[Hubセキュリティ](https://huggingface.co/docs/hub/security#malware-scanning)ドキュメンテーションを参照してください)、しかし、依然として悪意のあるコードを実行しないために、モデルコードと作者を確認する必要があります。 `trust_remote_code=True` を設定してカスタムコードを持つモデルを使用できます: ```py from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True) ``` コミットハッシュを「revision」として渡すことも強く推奨されています。これにより、モデルの作者がコードを悪意のある新しい行で更新しなかったことを確認できます(モデルの作者を完全に信頼している場合を除きます)。 ```py commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292" model = AutoModelForImageClassification.from_pretrained( "sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash ) ``` モデルリポジトリのコミット履歴をブラウジングする際には、任意のコミットのコミットハッシュを簡単にコピーできるボタンがあります。 ## Registering a model with custom code to the auto classes 🤗 Transformersを拡張するライブラリを作成している場合、独自のモデルを含めるために自動クラスを拡張したい場合があります。 これはコードをHubにプッシュすることとは異なり、ユーザーはカスタムモデルを取得するためにあなたのライブラリをインポートする必要があります (Hubからモデルコードを自動的にダウンロードするのとは対照的です)。 構成に既存のモデルタイプと異なる `model_type` 属性がある限り、またあなたのモデルクラスが適切な `config_class` 属性を持っている限り、 次のようにそれらを自動クラスに追加できます: ```py from transformers import AutoConfig, AutoModel, AutoModelForImageClassification AutoConfig.register("resnet", ResnetConfig) AutoModel.register(ResnetConfig, ResnetModel) AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification) ``` 注意: `AutoConfig` にカスタム設定を登録する際の最初の引数は、カスタム設定の `model_type` と一致する必要があります。 また、任意の自動モデルクラスにカスタムモデルを登録する際の最初の引数は、それらのモデルの `config_class` と一致する必要があります。
transformers/docs/source/ja/custom_models.md/0
{ "file_path": "transformers/docs/source/ja/custom_models.md", "repo_id": "transformers", "token_count": 7503 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Processors Transformers ライブラリでは、プロセッサは 2 つの異なる意味を持ちます。 - [Wav2Vec2](../model_doc/wav2vec2) などのマルチモーダル モデルの入力を前処理するオブジェクト (音声とテキスト) または [CLIP](../model_doc/clip) (テキストとビジョン) - 古いバージョンのライブラリで GLUE または SQUAD のデータを前処理するために使用されていたオブジェクトは非推奨になりました。 ## Multi-modal processors マルチモーダル モデルでは、オブジェクトが複数のモダリティ (テキスト、 視覚と音声)。これは、2 つ以上の処理オブジェクトをグループ化するプロセッサーと呼ばれるオブジェクトによって処理されます。 トークナイザー (テキスト モダリティ用)、画像プロセッサー (視覚用)、特徴抽出器 (オーディオ用) など。 これらのプロセッサは、保存およびロード機能を実装する次の基本クラスを継承します。 [[autodoc]] ProcessorMixin ## Deprecated processors すべてのプロセッサは、同じアーキテクチャに従っています。 [`~data.processors.utils.DataProcessor`]。プロセッサは次のリストを返します。 [`~data.processors.utils.InputExample`]。これら [`~data.processors.utils.InputExample`] は次のように変換できます。 [`~data.processors.utils.Input features`] をモデルにフィードします。 [[autodoc]] data.processors.utils.DataProcessor [[autodoc]] data.processors.utils.InputExample [[autodoc]] data.processors.utils.InputFeatures ## GLUE [一般言語理解評価 (GLUE)](https://gluebenchmark.com/) は、 既存の NLU タスクの多様なセットにわたるモデルのパフォーマンス。紙と同時発売された [GLUE: A 自然言語理解のためのマルチタスクベンチマークおよび分析プラットフォーム](https://openreview.net/pdf?id=rJ4km2R5t7) このライブラリは、MRPC、MNLI、MNLI (不一致)、CoLA、SST2、STSB、 QQP、QNLI、RTE、WNLI。 それらのプロセッサは次のとおりです。 - [`~data.processors.utils.MrpcProcessor`] - [`~data.processors.utils.MnliProcessor`] - [`~data.processors.utils.MnliMismatchedProcessor`] - [`~data.processors.utils.Sst2Processor`] - [`~data.processors.utils.StsbProcessor`] - [`~data.processors.utils.QqpProcessor`] - [`~data.processors.utils.QnliProcessor`] - [`~data.processors.utils.RteProcessor`] - [`~data.processors.utils.WnliProcessor`] さらに、次のメソッドを使用して、データ ファイルから値をロードし、それらをリストに変換することができます。 [`~data.processors.utils.InputExample`]。 [[autodoc]] data.processors.glue.glue_convert_examples_to_features ## XNLI [クロスリンガル NLI コーパス (XNLI)](https://www.nyu.edu/projects/bowman/xnli/) は、 言語を超えたテキスト表現の品質。 XNLI は、[*MultiNLI*](http://www.nyu.edu/projects/bowman/multinli/) に基づくクラウドソースのデータセットです。テキストのペアには、15 個のテキスト含意アノテーションがラベル付けされています。 さまざまな言語 (英語などの高リソース言語とスワヒリ語などの低リソース言語の両方を含む)。 論文 [XNLI: Evaluating Cross-lingual Sentence Representations](https://arxiv.org/abs/1809.05053) と同時にリリースされました。 このライブラリは、XNLI データをロードするプロセッサをホストします。 - [`~data.processors.utils.XnliProcessor`] テストセットにはゴールドラベルが付いているため、評価はテストセットで行われますのでご了承ください。 これらのプロセッサを使用する例は、[run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_xnli.py) スクリプトに示されています。 ## SQuAD [The Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer//) は、次のベンチマークです。 質問応答に関するモデルのパフォーマンスを評価します。 v1.1 と v2.0 の 2 つのバージョンが利用可能です。最初のバージョン (v1.1) は、論文 [SQuAD: 100,000+ question for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) とともにリリースされました。 2 番目のバージョン (v2.0) は、論文 [Know What You Don't と同時にリリースされました。 知っておくべき: SQuAD の答えられない質問](https://arxiv.org/abs/1806.03822)。 このライブラリは、次の 2 つのバージョンのそれぞれのプロセッサをホストします。 ### Processors それらのプロセッサは次のとおりです。 - [`~data.processors.utils.SquadV1Processor`] - [`~data.processors.utils.SquadV2Processor`] どちらも抽象クラス [`~data.processors.utils.SquadProcessor`] を継承しています。 [[autodoc]] data.processors.squad.SquadProcessor - all さらに、次のメソッドを使用して、SQuAD の例を次の形式に変換できます。 モデルの入力として使用できる [`~data.processors.utils.SquadFeatures`]。 [[autodoc]] data.processors.squad.squad_convert_examples_to_features これらのプロセッサと前述の方法は、データを含むファイルだけでなく、 *tensorflow_datasets* パッケージ。以下に例を示します。 ### Example usage 以下にプロセッサを使用した例と、データ ファイルを使用した変換方法を示します。 ```python # Loading a V2 processor processor = SquadV2Processor() examples = processor.get_dev_examples(squad_v2_data_dir) # Loading a V1 processor processor = SquadV1Processor() examples = processor.get_dev_examples(squad_v1_data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, ) ``` *tensorflow_datasets* の使用は、データ ファイルを使用するのと同じくらい簡単です。 ```python # tensorflow_datasets only handle Squad V1. tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, ) ``` これらのプロセッサを使用する別の例は、[run_squad.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering/run_squad.py) スクリプトに示されています。
transformers/docs/source/ja/main_classes/processors.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/processors.md", "repo_id": "transformers", "token_count": 3103 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BertGeneration ## Overview BertGeneration モデルは、次を使用してシーケンス間のタスクに利用できる BERT モデルです。 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) で提案されている [`EncoderDecoderModel`] タスク、Sascha Rothe、Sishi Nagayan、Aliaksei Severyn 著。 論文の要約は次のとおりです。 *大規模なニューラル モデルの教師なし事前トレーニングは、最近、自然言語処理に革命をもたらしました。による NLP 実践者は、公開されたチェックポイントからウォームスタートして、複数の項目で最先端の技術を推進してきました。 コンピューティング時間を大幅に節約しながらベンチマークを実行します。これまでのところ、主に自然言語に焦点を当ててきました。 タスクを理解する。この論文では、シーケンス生成のための事前トレーニングされたチェックポイントの有効性を実証します。私たちは 公開されている事前トレーニング済み BERT と互換性のある Transformer ベースのシーケンス間モデルを開発しました。 GPT-2 および RoBERTa チェックポイントを使用し、モデルの初期化の有用性について広範な実証研究を実施しました。 エンコーダとデコーダ、これらのチェックポイント。私たちのモデルは、機械翻訳に関する新しい最先端の結果をもたらします。 テキストの要約、文の分割、および文の融合。* ## Usage examples and tips - モデルを [`EncoderDecoderModel`] と組み合わせて使用​​して、2 つの事前トレーニングされたモデルを活用できます。 後続の微調整のための BERT チェックポイント。 ```python >>> # leverage checkpoints for Bert2Bert model... >>> # use BERT's cls token as BOS token and sep token as EOS token >>> encoder = BertGenerationEncoder.from_pretrained("google-bert/bert-large-uncased", bos_token_id=101, eos_token_id=102) >>> # add cross attention layers and use BERT's cls token as BOS token and sep token as EOS token >>> decoder = BertGenerationDecoder.from_pretrained( ... "google-bert/bert-large-uncased", add_cross_attention=True, is_decoder=True, bos_token_id=101, eos_token_id=102 ... ) >>> bert2bert = EncoderDecoderModel(encoder=encoder, decoder=decoder) >>> # create tokenizer... >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-large-uncased") >>> input_ids = tokenizer( ... "This is a long article to summarize", add_special_tokens=False, return_tensors="pt" ... ).input_ids >>> labels = tokenizer("This is a short summary", return_tensors="pt").input_ids >>> # train... >>> loss = bert2bert(input_ids=input_ids, decoder_input_ids=labels, labels=labels).loss >>> loss.backward() ``` - 事前トレーニングされた [`EncoderDecoderModel`] もモデル ハブで直接利用できます。 ```python >>> # instantiate sentence fusion model >>> sentence_fuser = EncoderDecoderModel.from_pretrained("google/roberta2roberta_L-24_discofuse") >>> tokenizer = AutoTokenizer.from_pretrained("google/roberta2roberta_L-24_discofuse") >>> input_ids = tokenizer( ... "This is the first sentence. This is the second sentence.", add_special_tokens=False, return_tensors="pt" ... ).input_ids >>> outputs = sentence_fuser.generate(input_ids) >>> print(tokenizer.decode(outputs[0])) ``` チップ: - [`BertGenerationEncoder`] と [`BertGenerationDecoder`] は、 [`EncoderDecoder`] と組み合わせます。 - 要約、文の分割、文の融合、および翻訳の場合、入力に特別なトークンは必要ありません。 したがって、入力の末尾に EOS トークンを追加しないでください。 このモデルは、[patrickvonplaten](https://huggingface.co/patrickvonplaten) によって提供されました。元のコードは次のとおりです [ここ](https://tfhub.dev/s?module-type=text-generation&subtype=module,placeholder) があります。 ## BertGenerationConfig [[autodoc]] BertGenerationConfig ## BertGenerationTokenizer [[autodoc]] BertGenerationTokenizer - save_vocabulary ## BertGenerationEncoder [[autodoc]] BertGenerationEncoder - forward ## BertGenerationDecoder [[autodoc]] BertGenerationDecoder - forward
transformers/docs/source/ja/model_doc/bert-generation.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bert-generation.md", "repo_id": "transformers", "token_count": 1974 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ByT5 ## Overview ByT5 モデルは、[ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel. 論文の要約は次のとおりです。 *最も広く使用されている事前トレーニング済み言語モデルは、単語またはサブワード単位に対応するトークンのシーケンスで動作します。 テキストをトークンのシーケンスとしてエンコードするには、トークナイザーが必要です。トークナイザーは通常、 モデル。代わりに生のテキスト (バイトまたは文字) を直接操作するトークンフリー モデルには多くの利点があります。 すぐに使用できるあらゆる言語のテキストを処理でき、ノイズに対してより堅牢であり、技術的負債を最小限に抑えます。 複雑でエラーが発生しやすいテキスト前処理パイプラインを削除します。バイトまたは文字列がトークンより長いため トークンフリー モデルに関する過去の研究では、シーケンスのコストを償却するように設計された新しいモデル アーキテクチャが導入されることがよくありました。 生のテキストを直接操作します。この論文では、標準的な Transformer アーキテクチャが次のようなもので使用できることを示します。 バイトシーケンスを処理するための最小限の変更。パラメータ数の観点からトレードオフを注意深く特徴付けます。 FLOP のトレーニングと推論速度を調べ、バイトレベルのモデルがトークンレベルと競合できることを示します。 対応者。また、バイトレベルのモデルはノイズに対して大幅に堅牢であり、より優れたパフォーマンスを発揮することも示しています。 スペルと発音に敏感なタスク。私たちの貢献の一環として、新しいセットをリリースします。 T5 アーキテクチャに基づいた事前トレーニング済みのバイトレベルの Transformer モデルと、そこで使用されるすべてのコードとデータ 実験。* このモデルは、[patrickvonplaten](https://huggingface.co/patrickvonplaten) によって提供されました。元のコードは次のとおりです [ここ](https://github.com/google-research/byt5) にあります。 <Tip> ByT5 のアーキテクチャは T5v1.1 モデルに基づいています。API リファレンスについては、[T5v1.1 のドキュメント ページ](t5v1.1) を参照してください。彼らは モデルの入力を準備する方法が異なるだけです。以下のコード例を参照してください。 </Tip> ByT5 は教師なしで事前トレーニングされているため、単一タスク中にタスク プレフィックスを使用する利点はありません。 微調整。マルチタスクの微調整を行う場合は、プレフィックスを使用する必要があります。 ## Usage Examples ByT5 は生の UTF-8 バイトで動作するため、トークナイザーなしで使用できます。 ```python >>> from transformers import T5ForConditionalGeneration >>> import torch >>> model = T5ForConditionalGeneration.from_pretrained("google/byt5-small") >>> num_special_tokens = 3 >>> # Model has 3 special tokens which take up the input ids 0,1,2 of ByT5. >>> # => Need to shift utf-8 character encodings by 3 before passing ids to model. >>> input_ids = torch.tensor([list("Life is like a box of chocolates.".encode("utf-8"))]) + num_special_tokens >>> labels = torch.tensor([list("La vie est comme une boîte de chocolat.".encode("utf-8"))]) + num_special_tokens >>> loss = model(input_ids, labels=labels).loss >>> loss.item() 2.66 ``` ただし、バッチ推論とトレーニングの場合は、トークナイザーを使用することをお勧めします。 ```python >>> from transformers import T5ForConditionalGeneration, AutoTokenizer >>> model = T5ForConditionalGeneration.from_pretrained("google/byt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/byt5-small") >>> model_inputs = tokenizer( ... ["Life is like a box of chocolates.", "Today is Monday."], padding="longest", return_tensors="pt" ... ) >>> labels_dict = tokenizer( ... ["La vie est comme une boîte de chocolat.", "Aujourd'hui c'est lundi."], padding="longest", return_tensors="pt" ... ) >>> labels = labels_dict.input_ids >>> loss = model(**model_inputs, labels=labels).loss >>> loss.item() 17.9 ``` [T5](t5) と同様に、ByT5 はスパンマスクノイズ除去タスクでトレーニングされました。しかし、 モデルはキャラクターに直接作用するため、事前トレーニングタスクは少し複雑です 違う。のいくつかの文字を破損してみましょう `"The dog chases a ball in the park."`という文を入力し、ByT5 に予測してもらいます。 わたしたちのため。 ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/byt5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("google/byt5-base") >>> input_ids_prompt = "The dog chases a ball in the park." >>> input_ids = tokenizer(input_ids_prompt).input_ids >>> # Note that we cannot add "{extra_id_...}" to the string directly >>> # as the Byte tokenizer would incorrectly merge the tokens >>> # For ByT5, we need to work directly on the character level >>> # Contrary to T5, ByT5 does not use sentinel tokens for masking, but instead >>> # uses final utf character ids. >>> # UTF-8 is represented by 8 bits and ByT5 has 3 special tokens. >>> # => There are 2**8+2 = 259 input ids and mask tokens count down from index 258. >>> # => mask to "The dog [258]a ball [257]park." >>> input_ids = torch.tensor([input_ids[:8] + [258] + input_ids[14:21] + [257] + input_ids[28:]]) >>> input_ids tensor([[ 87, 107, 104, 35, 103, 114, 106, 35, 258, 35, 100, 35, 101, 100, 111, 111, 257, 35, 115, 100, 117, 110, 49, 1]]) >>> # ByT5 produces only one char at a time so we need to produce many more output characters here -> set `max_length=100`. >>> output_ids = model.generate(input_ids, max_length=100)[0].tolist() >>> output_ids [0, 258, 108, 118, 35, 119, 107, 104, 35, 114, 113, 104, 35, 122, 107, 114, 35, 103, 114, 104, 118, 257, 35, 108, 113, 35, 119, 107, 104, 35, 103, 108, 118, 102, 114, 256, 108, 113, 35, 119, 107, 104, 35, 115, 100, 117, 110, 49, 35, 87, 107, 104, 35, 103, 114, 106, 35, 108, 118, 35, 119, 107, 104, 35, 114, 113, 104, 35, 122, 107, 114, 35, 103, 114, 104, 118, 35, 100, 35, 101, 100, 111, 111, 35, 108, 113, 255, 35, 108, 113, 35, 119, 107, 104, 35, 115, 100, 117, 110, 49] >>> # ^- Note how 258 descends to 257, 256, 255 >>> # Now we need to split on the sentinel tokens, let's write a short loop for this >>> output_ids_list = [] >>> start_token = 0 >>> sentinel_token = 258 >>> while sentinel_token in output_ids: ... split_idx = output_ids.index(sentinel_token) ... output_ids_list.append(output_ids[start_token:split_idx]) ... start_token = split_idx ... sentinel_token -= 1 >>> output_ids_list.append(output_ids[start_token:]) >>> output_string = tokenizer.batch_decode(output_ids_list) >>> output_string ['<pad>', 'is the one who does', ' in the disco', 'in the park. The dog is the one who does a ball in', ' in the park.'] ``` ## ByT5Tokenizer [[autodoc]] ByT5Tokenizer 詳細については、[`ByT5Tokenizer`] を参照してください。
transformers/docs/source/ja/model_doc/byt5.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/byt5.md", "repo_id": "transformers", "token_count": 3268 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CTRL <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=Salesforce/ctrl"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-ctrl-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/tiny-ctrl"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview CTRL モデルは、Nitish Shirish Keskar*、Bryan McCann*、Lav R. Varshney、Caiming Xiong, Richard Socher によって [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) で提案されました。 リチャード・ソーチャー。これは、非常に大規模なコーパスの言語モデリングを使用して事前トレーニングされた因果的 (一方向) トランスフォーマーです 最初のトークンが制御コード (リンク、書籍、Wikipedia など) として予約されている、約 140 GB のテキスト データ。 論文の要約は次のとおりです。 *大規模な言語モデルは有望なテキスト生成機能を示していますが、ユーザーは特定の言語モデルを簡単に制御できません 生成されたテキストの側面。 16 億 3,000 万パラメータの条件付きトランスフォーマー言語モデルである CTRL をリリースします。 スタイル、コンテンツ、タスク固有の動作を制御する制御コードを条件付けるように訓練されています。制御コードは 生のテキストと自然に共生する構造から派生し、教師なし学習の利点を維持しながら、 テキスト生成をより明示的に制御できるようになります。これらのコードを使用すると、CTRL でどの部分が予測されるのかを予測することもできます。 トレーニング データにはシーケンスが与えられる可能性が最も高くなります。これにより、大量のデータを分析するための潜在的な方法が提供されます。 モデルベースのソース帰属を介して。* このモデルは、[keskarnitishr](https://huggingface.co/keskarnitishr) によって提供されました。元のコードが見つかる [こちら](https://github.com/salesforce/Salesforce/ctrl)。 ## Usage tips - CTRL は制御コードを利用してテキストを生成します。生成を特定の単語や文で開始する必要があります。 またはリンクして一貫したテキストを生成します。 [元の実装](https://github.com/salesforce/Salesforce/ctrl) を参照してください。 詳しくは。 - CTRL は絶対位置埋め込みを備えたモデルであるため、通常は入力を右側にパディングすることをお勧めします。 左。 - CTRL は因果言語モデリング (CLM) の目的でトレーニングされているため、次の予測に強力です。 シーケンス内のトークン。この機能を利用すると、CTRL は構文的に一貫したテキストを生成できるようになります。 *run_generation.py* サンプル スクリプトで確認できます。 - PyTorch モデルは、以前に計算されたキーと値のアテンション ペアである`past_key_values`を入力として受け取ることができます。 TensorFlow モデルは`past`を入力として受け入れます。 `past_key_values`値を使用すると、モデルが再計算されなくなります。 テキスト生成のコンテキストで事前に計算された値。 [`forward`](model_doc/ctrl#transformers.CTRLModel.forward) を参照してください。 この引数の使用法の詳細については、メソッドを参照してください。 ## Resources - [テキスト分類タスクガイド](../tasks/sequence_classification) - [因果言語モデリング タスク ガイド](../tasks/language_modeling) ## CTRLConfig [[autodoc]] CTRLConfig ## CTRLTokenizer [[autodoc]] CTRLTokenizer - save_vocabulary <frameworkcontent> <pt> ## CTRLModel [[autodoc]] CTRLModel - forward ## CTRLLMHeadModel [[autodoc]] CTRLLMHeadModel - forward ## CTRLForSequenceClassification [[autodoc]] CTRLForSequenceClassification - forward </pt> <tf> ## TFCTRLModel [[autodoc]] TFCTRLModel - call ## TFCTRLLMHeadModel [[autodoc]] TFCTRLLMHeadModel - call ## TFCTRLForSequenceClassification [[autodoc]] TFCTRLForSequenceClassification - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/ctrl.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/ctrl.md", "repo_id": "transformers", "token_count": 2127 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 推論のための多言語モデル [[open-in-colab]] 🤗 Transformers にはいくつかの多言語モデルがあり、それらの推論の使用方法は単一言語モデルとは異なります。ただし、多言語モデルの使用方法がすべて異なるわけではありません。 [google-bert/bert-base-multilingual-uncased](https://huggingface.co/google-bert/bert-base-multilingual-uncased) などの一部のモデルは、単一言語モデルと同様に使用できます。 このガイドでは、推論のために使用方法が異なる多言語モデルをどのように使うかを示します。 ## XLM XLM には10の異なるチェックポイントがあり、そのうちの1つだけが単一言語です。 残りの9つのモデルチェックポイントは、言語埋め込みを使用するチェックポイントと使用しないチェックポイントの2つのカテゴリに分けることができます。 ### 言語の埋め込みがある XLM 次の XLM モデルは、言語の埋め込みを使用して、推論で使用される言語を指定します。 - `FacebookAI/xlm-mlm-ende-1024` (マスク化された言語モデリング、英語-ドイツ語) - `FacebookAI/xlm-mlm-enfr-1024` (マスク化された言語モデリング、英語-フランス語) - `FacebookAI/xlm-mlm-enro-1024` (マスク化された言語モデリング、英語-ルーマニア語) - `FacebookAI/xlm-mlm-xnli15-1024` (マスク化された言語モデリング、XNLI 言語) - `FacebookAI/xlm-mlm-tlm-xnli15-1024` (マスク化された言語モデリング + 翻訳 + XNLI 言語) - `FacebookAI/xlm-clm-enfr-1024` (因果言語モデリング、英語-フランス語) - `FacebookAI/xlm-clm-ende-1024` (因果言語モデリング、英語-ドイツ語) 言語の埋め込みは、モデルに渡される `input_ids` と同じ形状のテンソルとして表されます。 これらのテンソルの値は、使用される言語に依存し、トークナイザーの `lang2id` および `id2lang` 属性によって識別されます。 この例では、`FacebookAI/xlm-clm-enfr-1024` チェックポイントをロードします (因果言語モデリング、英語-フランス語)。 ```py >>> import torch >>> from transformers import XLMTokenizer, XLMWithLMHeadModel >>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-clm-enfr-1024") >>> model = XLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-clm-enfr-1024") ``` トークナイザーの `lang2id` 属性は、このモデルの言語とその ID を表示します。 ```py >>> print(tokenizer.lang2id) {'en': 0, 'fr': 1} ``` 次に、入力例を作成します。 ```py >>> input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1 ``` 言語 ID を `en` に設定し、それを使用して言語の埋め込みを定義します。 言語の埋め込みは、英語の言語 ID であるため、`0` で埋められたテンソルです。 このテンソルは `input_ids` と同じサイズにする必要があります。 ```py >>> language_id = tokenizer.lang2id["en"] # 0 >>> langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0]) >>> # We reshape it to be of size (batch_size, sequence_length) >>> langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1) ``` これで、`input_ids` と言語の埋め込みをモデルに渡すことができます。 ```py >>> outputs = model(input_ids, langs=langs) ``` [run_generation.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation/run_generation.py) スクリプトは、`xlm-clm` チェックポイントを使用して、言語が埋め込まれたテキストを生成できます。 ### 言語の埋め込みがないXLM 次の XLM モデルは、推論中に言語の埋め込みを必要としません。 - `FacebookAI/xlm-mlm-17-1280` (マスク化された言語モデリング、17の言語) - `FacebookAI/xlm-mlm-100-1280` (マスク化された言語モデリング、100の言語) これらのモデルは、以前の XLM チェックポイントとは異なり、一般的な文の表現に使用されます。 ## BERT 以下の BERT モデルは、多言語タスクに使用できます。 - `google-bert/bert-base-multilingual-uncased` (マスク化された言語モデリング + 次の文の予測、102の言語) - `google-bert/bert-base-multilingual-cased` (マスク化された言語モデリング + 次の文の予測、104の言語) これらのモデルは、推論中に言語の埋め込みを必要としません。 文脈から言語を識別し、それに応じて推測する必要があります。 ## XLM-RoBERTa 次の XLM-RoBERTa モデルは、多言語タスクに使用できます。 - `FacebookAI/xlm-roberta-base` (マスク化された言語モデリング、100の言語) - `FacebookAI/xlm-roberta-large` (マスク化された言語モデリング、100の言語) XLM-RoBERTa は、100の言語で新しく作成およびクリーニングされた2.5 TB の CommonCrawl データでトレーニングされました。 これは、分類、シーケンスのラベル付け、質問応答などのダウンストリームタスクで、mBERT や XLM などの以前にリリースされた多言語モデルを大幅に改善します。 ## M2M100 次の M2M100 モデルは、多言語翻訳に使用できます。 - `facebook/m2m100_418M` (翻訳) - `facebook/m2m100_1.2B` (翻訳) この例では、`facebook/m2m100_418M` チェックポイントをロードして、中国語から英語に翻訳します。 トークナイザーでソース言語を設定できます。 ```py >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> chinese_text = "不要插手巫師的事務, 因為他們是微妙的, 很快就會發怒." >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh") >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") ``` テキストをトークン化します。 ```py >>> encoded_zh = tokenizer(chinese_text, return_tensors="pt") ``` M2M100 は、最初に生成されたトークンとしてターゲット言語 ID を強制的にターゲット言語に翻訳します。 英語に翻訳するには、`generate` メソッドで `forced_bos_token_id` を `en` に設定します。 ```py >>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) 'Do not interfere with the matters of the witches, because they are delicate and will soon be angry.' ``` ## MBart 多言語翻訳には、次の MBart モデルを使用できます。 - `facebook/mbart-large-50-one-to-many-mmt` (One-to-many multilingual machine translation, 50 languages) - `facebook/mbart-large-50-many-to-many-mmt` (Many-to-many multilingual machine translation, 50 languages) - `facebook/mbart-large-50-many-to-one-mmt` (Many-to-one multilingual machine translation, 50 languages) - `facebook/mbart-large-50` (Multilingual translation, 50 languages) - `facebook/mbart-large-cc25` この例では、`facebook/mbart-large-50-many-to-many-mmt` チェックポイントをロードして、フィンランド語を英語に翻訳します。トークナイザーでソース言語を設定できます。 ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> fi_text = "Älä sekaannu velhojen asioihin, sillä ne ovat hienovaraisia ja nopeasti vihaisia." >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") ``` テキストをトークン化します。 ```py >>> encoded_en = tokenizer(en_text, return_tensors="pt") ``` MBart は、最初に生成されたトークンとしてターゲット言語 ID を強制的にターゲット言語に翻訳します。 英語に翻訳するには、`generate` メソッドで `forced_bos_token_id` を `en` に設定します。 ```py >>> generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id("en_XX")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) "Don't interfere with the wizard's affairs, because they are subtle, will soon get angry." ``` `facebook/mbart-large-50-many-to-one-mmt` チェックポイントを使用している場合、最初に生成されたトークンとしてターゲット言語 ID を強制する必要はありません。それ以外の場合、使用方法は同じです。
transformers/docs/source/ja/multilingual.md/0
{ "file_path": "transformers/docs/source/ja/multilingual.md", "repo_id": "transformers", "token_count": 4144 }
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Performance and Scalability 大規模なトランスフォーマーモデルのトレーニングおよび本番環境への展開はさまざまな課題を提起します。 トレーニング中には、モデルが利用可能なGPUメモリよりも多くを必要としたり、トレーニング速度が遅かったりする可能性があります。 デプロイフェーズでは、モデルが本番環境で必要なスループットを処理するのに苦労することがあります。 このドキュメンテーションは、これらの課題を克服し、ユースケースに最適な設定を見つけるのに役立つことを目的としています。 ガイドはトレーニングと推論のセクションに分かれており、それぞれ異なる課題と解決策が存在します。 各セクション内には、トレーニング用のシングルGPU対マルチGPU、推論用のCPU対GPUなど、異なるハードウェア構成用の別々のガイドが用意されています。 このドキュメントを出発点として、シナリオに合った方法に進むための情報源としてご利用ください。 ## Training 大規模なトランスフォーマーモデルを効率的にトレーニングするには、GPUやTPUなどのアクセラレータが必要です。 最も一般的なケースは、シングルGPUがある場合です。シングルGPUでのトレーニング効率を最適化するための一般的なアプローチを学ぶには、以下を参照してください。 * [シングルGPUでの効率的なトレーニングのための方法とツール](perf_train_gpu_one): GPUメモリの効果的な利用、トレーニングの高速化などを支援する共通のアプローチを学ぶためにここから始めてください。 * [マルチGPUトレーニングセクション](perf_train_gpu_many): マルチGPU環境に適用されるデータ、テンソル、パイプライン並列性など、さらなる最適化方法について詳細に学びます。 * [CPUトレーニングセクション](perf_train_cpu): CPU上での混合精度トレーニングについて学びます。 * [複数CPUでの効率的なトレーニング](perf_train_cpu_many): 分散CPUトレーニングについて学びます。 * [TensorFlowでTPUを使用したトレーニング](perf_train_tpu_tf): TPUに慣れていない場合は、TPUでのトレーニングとXLAの使用についてのセクションを参照してください。 * [トレーニングのためのカスタムハードウェア](perf_hardware): 独自のディープラーニング環境を構築する際のヒントやトリックを見つけます。 * [Trainer APIを使用したハイパーパラメーター検索](hpo_train) ## Inference 本番環境で大規模なモデルを効率的に推論することは、それらをトレーニングすることと同じくらい難しいことがあります。 以下のセクションでは、CPUおよびシングル/マルチGPU環境で推論を実行する手順について説明します。 * [シングルCPUでの推論](perf_infer_cpu) * [シングルGPUでの推論](perf_infer_gpu_one) * [マルチGPU推論](perf_infer_gpu_many) * [TensorFlowモデルのXLA統合](tf_xla) ## Training and inference モデルをトレーニングするか、それを使用して推論を実行するかに関係なく適用されるテクニック、ヒント、トリックがここにあります。 * [大規模モデルのインスタンス化](big_models) * [パフォーマンスの問題のトラブルシューティング](debugging) ## Contribute このドキュメントはまだ完全ではなく、さらに追加する必要がある項目がたくさんあります。 追加や訂正が必要な場合は、遠慮せずにPRをオープンするか、詳細を議論するためにIssueを開始してください。 AがBよりも優れているという貢献を行う際には、再現可能なベンチマークやその情報の出典へのリンクを含めてみてください(あなた自身の情報である場合を除く)。
transformers/docs/source/ja/performance.md/0
{ "file_path": "transformers/docs/source/ja/performance.md", "repo_id": "transformers", "token_count": 2063 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image classification [[open-in-colab]] <Youtube id="tjAIM7BOYhw"/> 画像分類では、画像にラベルまたはクラスを割り当てます。テキストや音声の分類とは異なり、入力は 画像を構成するピクセル値。損傷の検出など、画像分類には多くの用途があります 自然災害の後、作物の健康状態を監視したり、病気の兆候がないか医療画像をスクリーニングしたりするのに役立ちます。 このガイドでは、次の方法を説明します。 1. [Food-101](https://huggingface.co/datasets/food101) データセットの [ViT](model_doc/vit) を微調整して、画像内の食品を分類します。 2. 微調整したモデルを推論に使用します。 <Tip> このタスクと互換性のあるすべてのアーキテクチャとチェックポイントを確認するには、[タスクページ](https://huggingface.co/tasks/image-classification) を確認することをお勧めします。 </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install transformers datasets evaluate ``` Hugging Face アカウントにログインして、モデルをアップロードしてコミュニティと共有することをお勧めします。プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load Food-101 dataset Datasets、🤗 データセット ライブラリから Food-101 データセットの小さいサブセットを読み込みます。これにより、次の機会が得られます 完全なデータセットのトレーニングにさらに時間を費やす前に、実験してすべてが機能することを確認してください。 ```py >>> from datasets import load_dataset >>> food = load_dataset("food101", split="train[:5000]") ``` [`~datasets.Dataset.train_test_split`] メソッドを使用して、データセットの `train` 分割をトレイン セットとテスト セットに分割します。 ```py >>> food = food.train_test_split(test_size=0.2) ``` 次に、例を見てみましょう。 ```py >>> food["train"][0] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F52AFC8AC50>, 'label': 79} ``` データセット内の各例には 2 つのフィールドがあります。 - `image`: 食品の PIL 画像 - `label`: 食品のラベルクラス モデルがラベル ID からラベル名を取得しやすくするために、ラベル名をマップする辞書を作成します。 整数への変換、またはその逆: ```py >>> labels = food["train"].features["label"].names >>> label2id, id2label = dict(), dict() >>> for i, label in enumerate(labels): ... label2id[label] = str(i) ... id2label[str(i)] = label ``` これで、ラベル ID をラベル名に変換できるようになりました。 ```py >>> id2label[str(79)] 'prime_rib' ``` ## Preprocess 次のステップでは、ViT 画像プロセッサをロードして画像をテンソルに処理します。 ```py >>> from transformers import AutoImageProcessor >>> checkpoint = "google/vit-base-patch16-224-in21k" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) ``` <frameworkcontent> <pt> いくつかの画像変換を画像に適用して、モデルの過学習に対する堅牢性を高めます。ここでは torchvision の [`transforms`](https://pytorch.org/vision/stable/transforms.html) モジュールを使用しますが、任意の画像ライブラリを使用することもできます。 画像のランダムな部分をトリミングし、サイズを変更し、画像の平均と標準偏差で正規化します。 ```py >>> from torchvision.transforms import RandomResizedCrop, Compose, Normalize, ToTensor >>> normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) >>> size = ( ... image_processor.size["shortest_edge"] ... if "shortest_edge" in image_processor.size ... else (image_processor.size["height"], image_processor.size["width"]) ... ) >>> _transforms = Compose([RandomResizedCrop(size), ToTensor(), normalize]) ``` 次に、変換を適用し、画像の `pixel_values` (モデルへの入力) を返す前処理関数を作成します。 ```py >>> def transforms(examples): ... examples["pixel_values"] = [_transforms(img.convert("RGB")) for img in examples["image"]] ... del examples["image"] ... return examples ``` データセット全体に前処理関数を適用するには、🤗 Datasets [`~datasets.Dataset.with_transform`] メソッドを使用します。変換は、データセットの要素を読み込むときにオンザフライで適用されます。 ```py >>> food = food.with_transform(transforms) ``` 次に、[`DefaultDataCollat​​or`] を使用してサンプルのバッチを作成します。 🤗 Transformers の他のデータ照合器とは異なり、`DefaultDataCollat​​or` はパディングなどの追加の前処理を適用しません。 ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator() ``` </pt> </frameworkcontent> <frameworkcontent> <tf> 過剰適合を回避し、モデルをより堅牢にするために、データセットのトレーニング部分にデータ拡張を追加します。 ここでは、Keras 前処理レイヤーを使用してトレーニング データの変換 (データ拡張を含む) を定義します。 検証データの変換 (中央のトリミング、サイズ変更、正規化のみ)。 `tf.image` または 他のライブラリでも構いません。 ```py >>> from tensorflow import keras >>> from tensorflow.keras import layers >>> size = (image_processor.size["height"], image_processor.size["width"]) >>> train_data_augmentation = keras.Sequential( ... [ ... layers.RandomCrop(size[0], size[1]), ... layers.Rescaling(scale=1.0 / 127.5, offset=-1), ... layers.RandomFlip("horizontal"), ... layers.RandomRotation(factor=0.02), ... layers.RandomZoom(height_factor=0.2, width_factor=0.2), ... ], ... name="train_data_augmentation", ... ) >>> val_data_augmentation = keras.Sequential( ... [ ... layers.CenterCrop(size[0], size[1]), ... layers.Rescaling(scale=1.0 / 127.5, offset=-1), ... ], ... name="val_data_augmentation", ... ) ``` 次に、一度に 1 つの画像ではなく、画像のバッチに適切な変換を適用する関数を作成します。 ```py >>> import numpy as np >>> import tensorflow as tf >>> from PIL import Image >>> def convert_to_tf_tensor(image: Image): ... np_image = np.array(image) ... tf_image = tf.convert_to_tensor(np_image) ... # `expand_dims()` is used to add a batch dimension since ... # the TF augmentation layers operates on batched inputs. ... return tf.expand_dims(tf_image, 0) >>> def preprocess_train(example_batch): ... """Apply train_transforms across a batch.""" ... images = [ ... train_data_augmentation(convert_to_tf_tensor(image.convert("RGB"))) for image in example_batch["image"] ... ] ... example_batch["pixel_values"] = [tf.transpose(tf.squeeze(image)) for image in images] ... return example_batch ... def preprocess_val(example_batch): ... """Apply val_transforms across a batch.""" ... images = [ ... val_data_augmentation(convert_to_tf_tensor(image.convert("RGB"))) for image in example_batch["image"] ... ] ... example_batch["pixel_values"] = [tf.transpose(tf.squeeze(image)) for image in images] ... return example_batch ``` 🤗 データセット [`~datasets.Dataset.set_transform`] を使用して、その場で変換を適用します。 ```py food["train"].set_transform(preprocess_train) food["test"].set_transform(preprocess_val) ``` 最後の前処理ステップとして、`DefaultDataCollat​​or`を使用してサンプルのバッチを作成します。 🤗 Transformers の他のデータ照合機能とは異なり、 `DefaultDataCollat​​or` は、パディングなどの追加の前処理を適用しません。 ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator(return_tensors="tf") ``` </tf> </frameworkcontent> ## Evaluate トレーニング中にメトリクスを含めると、多くの場合、モデルのパフォーマンスを評価するのに役立ちます。すぐにロードできます 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) ライブラリを使用した評価方法。このタスクでは、ロードします [accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) 指標 (詳細については、🤗 評価 [クイック ツアー](https://huggingface.co/docs/evaluate/a_quick_tour) を参照してくださいメトリクスをロードして計算する方法): ```py >>> import evaluate >>> accuracy = evaluate.load("accuracy") ``` 次に、予測とラベルを [`~evaluate.EvaluationModule.compute`] に渡して精度を計算する関数を作成します。 ```py >>> import numpy as np >>> def compute_metrics(eval_pred): ... predictions, labels = eval_pred ... predictions = np.argmax(predictions, axis=1) ... return accuracy.compute(predictions=predictions, references=labels) ``` これで `compute_metrics`関数の準備が整いました。トレーニングを設定するときにこの関数に戻ります。 ## Train <frameworkcontent> <pt> <Tip> [`Trainer`] を使用したモデルの微調整に慣れていない場合は、[こちら](../training#train-with-pytorch-trainer) の基本的なチュートリアルをご覧ください。 </Tip> これでモデルのトレーニングを開始する準備が整いました。 [`AutoModelForImageClassification`] を使用して ViT をロードします。ラベルの数と予想されるラベルの数、およびラベル マッピングを指定します。 ```py >>> from transformers import AutoModelForImageClassification, TrainingArguments, Trainer >>> model = AutoModelForImageClassification.from_pretrained( ... checkpoint, ... num_labels=len(labels), ... id2label=id2label, ... label2id=label2id, ... ) ``` この時点で残っているステップは 3 つだけです。 1. [`TrainingArguments`] でトレーニング ハイパーパラメータを定義します。 `image` 列が削除されるため、未使用の列を削除しないことが重要です。 `image` 列がないと、`pixel_values` を作成できません。この動作を防ぐには、`remove_unused_columns=False`を設定してください。他に必要なパラメータは、モデルの保存場所を指定する `output_dir` だけです。 `push_to_hub=True`を設定して、このモデルをハブにプッシュします (モデルをアップロードするには、Hugging Face にサインインする必要があります)。各エポックの終了時に、[`Trainer`] は精度を評価し、トレーニング チェックポイントを保存します。 2. トレーニング引数を、モデル、データセット、トークナイザー、データ照合器、および `compute_metrics` 関数とともに [`Trainer`] に渡します。 3. [`~Trainer.train`] を呼び出してモデルを微調整します。 ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_food_model", ... remove_unused_columns=False, ... eval_strategy="epoch", ... save_strategy="epoch", ... learning_rate=5e-5, ... per_device_train_batch_size=16, ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=16, ... num_train_epochs=3, ... warmup_ratio=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... data_collator=data_collator, ... train_dataset=food["train"], ... eval_dataset=food["test"], ... processing_class=image_processor, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` トレーニングが完了したら、 [`~transformers.Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、誰もがモデルを使用できるようにします。 ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <frameworkcontent> <tf> <Tip> Keras を使用したモデルの微調整に慣れていない場合は、まず [基本チュートリアル](./training#train-a-tensorflow-model-with-keras) を確認してください。 </Tip> TensorFlow でモデルを微調整するには、次の手順に従います。 1. トレーニングのハイパーパラメータを定義し、オプティマイザーと学習率スケジュールを設定します。 2. 事前トレーニングされたモデルをインスタンス化します。 3. 🤗 データセットを `tf.data.Dataset` に変換します。 4. モデルをコンパイルします。 5. コールバックを追加し、`fit()` メソッドを使用してトレーニングを実行します。 6. モデルを 🤗 Hub にアップロードしてコミュニティと共有します。 まず、ハイパーパラメーター、オプティマイザー、学習率スケジュールを定義します。 ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_epochs = 5 >>> num_train_steps = len(food["train"]) * num_epochs >>> learning_rate = 3e-5 >>> weight_decay_rate = 0.01 >>> optimizer, lr_schedule = create_optimizer( ... init_lr=learning_rate, ... num_train_steps=num_train_steps, ... weight_decay_rate=weight_decay_rate, ... num_warmup_steps=0, ... ) ``` 次に、ラベル マッピングとともに [`TFAutoModelForImageClassification`] を使用して ViT を読み込みます。 ```py >>> from transformers import TFAutoModelForImageClassification >>> model = TFAutoModelForImageClassification.from_pretrained( ... checkpoint, ... id2label=id2label, ... label2id=label2id, ... ) ``` Convert your datasets to the `tf.data.Dataset` format using the [`~datasets.Dataset.to_tf_dataset`] and your `data_collator`: ```py >>> # converting our train dataset to tf.data.Dataset >>> tf_train_dataset = food["train"].to_tf_dataset( ... columns="pixel_values", label_cols="label", shuffle=True, batch_size=batch_size, collate_fn=data_collator ... ) >>> # converting our test dataset to tf.data.Dataset >>> tf_eval_dataset = food["test"].to_tf_dataset( ... columns="pixel_values", label_cols="label", shuffle=True, batch_size=batch_size, collate_fn=data_collator ... ) ``` `compile()` を使用してトレーニング用にモデルを設定します。 ```py >>> from tensorflow.keras.losses import SparseCategoricalCrossentropy >>> loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) >>> model.compile(optimizer=optimizer, loss=loss) ``` 予測から精度を計算し、モデルを 🤗 ハブにプッシュするには、[Keras callbacks](../main_classes/keras_callbacks) を使用します。 `compute_metrics` 関数を [KerasMetricCallback](../main_classes/keras_callbacks#transformers.KerasMetricCallback) に渡します。 [PushToHubCallback](../main_classes/keras_callbacks#transformers.PushToHubCallback) を使用してモデルをアップロードします。 ```py >>> from transformers.keras_callbacks import KerasMetricCallback, PushToHubCallback >>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_eval_dataset) >>> push_to_hub_callback = PushToHubCallback( ... output_dir="food_classifier", ... tokenizer=image_processor, ... save_strategy="no", ... ) >>> callbacks = [metric_callback, push_to_hub_callback] ``` ついに、モデルをトレーニングする準備が整いました。トレーニングおよび検証データセット、エポック数、 モデルを微調整するためのコールバック: ```py >>> model.fit(tf_train_dataset, validation_data=tf_eval_dataset, epochs=num_epochs, callbacks=callbacks) Epoch 1/5 250/250 [==============================] - 313s 1s/step - loss: 2.5623 - val_loss: 1.4161 - accuracy: 0.9290 Epoch 2/5 250/250 [==============================] - 265s 1s/step - loss: 0.9181 - val_loss: 0.6808 - accuracy: 0.9690 Epoch 3/5 250/250 [==============================] - 252s 1s/step - loss: 0.3910 - val_loss: 0.4303 - accuracy: 0.9820 Epoch 4/5 250/250 [==============================] - 251s 1s/step - loss: 0.2028 - val_loss: 0.3191 - accuracy: 0.9900 Epoch 5/5 250/250 [==============================] - 238s 949ms/step - loss: 0.1232 - val_loss: 0.3259 - accuracy: 0.9890 ``` おめでとう!モデルを微調整し、🤗 Hub で共有しました。これで推論に使用できるようになりました。 </tf> </frameworkcontent> <Tip> 画像分類用のモデルを微調整する方法の詳細な例については、対応する [PyTorch ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb) </Tip> ## Inference モデルを微調整したので、それを推論に使用できるようになりました。 推論を実行したい画像を読み込みます。 ```py >>> ds = load_dataset("food101", split="validation[:10]") >>> image = ds["image"][0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png" alt="image of beignets"/> </div> 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`] で使用することです。モデルを使用して画像分類用の`pipeline`をインスタンス化し、それに画像を渡します。 ```py >>> from transformers import pipeline >>> classifier = pipeline("image-classification", model="my_awesome_food_model") >>> classifier(image) [{'score': 0.31856709718704224, 'label': 'beignets'}, {'score': 0.015232225880026817, 'label': 'bruschetta'}, {'score': 0.01519392803311348, 'label': 'chicken_wings'}, {'score': 0.013022331520915031, 'label': 'pork_chop'}, {'score': 0.012728818692266941, 'label': 'prime_rib'}] ``` 必要に応じて、`pipeline`の結果を手動で複製することもできます。 <frameworkcontent> <pt> 画像プロセッサをロードして画像を前処理し、`input`を PyTorch テンソルとして返します。 ```py >>> from transformers import AutoImageProcessor >>> import torch >>> image_processor = AutoImageProcessor.from_pretrained("my_awesome_food_model") >>> inputs = image_processor(image, return_tensors="pt") ``` 入力をモデルに渡し、ロジットを返します。 ```py >>> from transformers import AutoModelForImageClassification >>> model = AutoModelForImageClassification.from_pretrained("my_awesome_food_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` 最も高い確率で予測されたラベルを取得し、モデルの `id2label` マッピングを使用してラベルに変換します。 ```py >>> predicted_label = logits.argmax(-1).item() >>> model.config.id2label[predicted_label] 'beignets' ``` </pt> </frameworkcontent> <frameworkcontent> <tf> 画像プロセッサをロードして画像を前処理し、`input`を TensorFlow テンソルとして返します。 ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("MariaK/food_classifier") >>> inputs = image_processor(image, return_tensors="tf") ``` 入力をモデルに渡し、ロジットを返します。 ```py >>> from transformers import TFAutoModelForImageClassification >>> model = TFAutoModelForImageClassification.from_pretrained("MariaK/food_classifier") >>> logits = model(**inputs).logits ``` 最も高い確率で予測されたラベルを取得し、モデルの `id2label` マッピングを使用してラベルに変換します。 ```py >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0]) >>> model.config.id2label[predicted_class_id] 'beignets' ``` </tf> </frameworkcontent>
transformers/docs/source/ja/tasks/image_classification.md/0
{ "file_path": "transformers/docs/source/ja/tasks/image_classification.md", "repo_id": "transformers", "token_count": 8613 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Video classification [[open-in-colab]] ビデオ分類は、ビデオ全体にラベルまたはクラスを割り当てるタスクです。ビデオには、各ビデオに 1 つのクラスのみが含まれることが期待されます。ビデオ分類モデルはビデオを入力として受け取り、ビデオがどのクラスに属するかについての予測を返します。これらのモデルを使用して、ビデオの内容を分類できます。ビデオ分類の実際のアプリケーションはアクション/アクティビティ認識であり、フィットネス アプリケーションに役立ちます。また、視覚障害のある人にとって、特に通勤時に役立ちます。 このガイドでは、次の方法を説明します。 1. [UCF101](https://www.crcv.ucf.edu/) のサブセットで [VideoMAE](https://huggingface.co/docs/transformers/main/en/model_doc/videomae) を微調整します。 data/UCF101.php) データセット。 2. 微調整したモデルを推論に使用します。 <Tip> このタスクと互換性のあるすべてのアーキテクチャとチェックポイントを確認するには、[タスクページ](https://huggingface.co/tasks/video-classification) を確認することをお勧めします。 </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install -q pytorchvideo transformers evaluate ``` [PyTorchVideo](https://pytorchvideo.org/) (`pytorchvideo` と呼ばれます) を使用してビデオを処理し、準備します。 モデルをアップロードしてコミュニティと共有できるように、Hugging Face アカウントにログインすることをお勧めします。プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load UCF101 dataset まず、[UCF-101 データセット](https://www.crcv.ucf.edu/data/UCF101.php) のサブセットをロードします。これにより、完全なデータセットのトレーニングにさらに時間を費やす前に、実験してすべてが機能することを確認する機会が得られます。 ```py >>> from huggingface_hub import hf_hub_download >>> hf_dataset_identifier = "sayakpaul/ucf101-subset" >>> filename = "UCF101_subset.tar.gz" >>> file_path = hf_hub_download(repo_id=hf_dataset_identifier, filename=filename, repo_type="dataset") ``` サブセットをダウンロードした後、圧縮アーカイブを抽出する必要があります。 ```py >>> import tarfile >>> with tarfile.open(file_path) as t: ... t.extractall(".") ``` 大まかに言うと、データセットは次のように構成されています。 ```bash UCF101_subset/ train/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ... val/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ... test/ BandMarching/ video_1.mp4 video_2.mp4 ... Archery video_1.mp4 video_2.mp4 ... ... ``` (`sorted`)された ビデオ パスは次のように表示されます。 ```bash ... 'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c04.avi', 'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c06.avi', 'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g08_c01.avi', 'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c02.avi', 'UCF101_subset/train/ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c06.avi' ... ``` 同じグループ/シーンに属するビデオ クリップがあり、ビデオ ファイル パスではグループが`g`で示されていることがわかります。たとえば、`v_ApplyEyeMakeup_g07_c04.avi`や`v_ApplyEyeMakeup_g07_c06.avi`などです。 検証と評価の分割では、[データ漏洩](https://www.kaggle.com/code/alexisbcook/data-leakage) を防ぐために、同じグループ/シーンからのビデオ クリップを使用しないでください。このチュートリアルで使用しているサブセットでは、この情報が考慮されています。 次に、データセット内に存在するラベルのセットを取得します。また、モデルを初期化するときに役立つ 2 つの辞書を作成します。 * `label2id`: クラス名を整数にマップします。 * `id2label`: 整数をクラス名にマッピングします。 ```py >>> class_labels = sorted({str(path).split("/")[2] for path in all_video_file_paths}) >>> label2id = {label: i for i, label in enumerate(class_labels)} >>> id2label = {i: label for label, i in label2id.items()} >>> print(f"Unique classes: {list(label2id.keys())}.") # Unique classes: ['ApplyEyeMakeup', 'ApplyLipstick', 'Archery', 'BabyCrawling', 'BalanceBeam', 'BandMarching', 'BaseballPitch', 'Basketball', 'BasketballDunk', 'BenchPress']. ``` 個性的なクラスが10種類あります。トレーニング セットには、クラスごとに 30 個のビデオがあります。 ## Load a model to fine-tune 事前トレーニングされたチェックポイントとそれに関連する画像プロセッサからビデオ分類モデルをインスタンス化します。モデルのエンコーダーには事前トレーニングされたパラメーターが付属しており、分類ヘッドはランダムに初期化されます。画像プロセッサは、データセットの前処理パイプラインを作成するときに役立ちます。 ```py >>> from transformers import VideoMAEImageProcessor, VideoMAEForVideoClassification >>> model_ckpt = "MCG-NJU/videomae-base" >>> image_processor = VideoMAEImageProcessor.from_pretrained(model_ckpt) >>> model = VideoMAEForVideoClassification.from_pretrained( ... model_ckpt, ... label2id=label2id, ... id2label=id2label, ... ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ... ) ``` モデルのロード中に、次の警告が表示される場合があります。 ```bash Some weights of the model checkpoint at MCG-NJU/videomae-base were not used when initializing VideoMAEForVideoClassification: [..., 'decoder.decoder_layers.1.attention.output.dense.bias', 'decoder.decoder_layers.2.attention.attention.key.weight'] - This IS expected if you are initializing VideoMAEForVideoClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing VideoMAEForVideoClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of VideoMAEForVideoClassification were not initialized from the model checkpoint at MCG-NJU/videomae-base and are newly initialized: ['classifier.bias', 'classifier.weight'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` この警告は、一部の重み (たとえば、`classifier`層の重みとバイアス) を破棄し、他のいくつかの重み (新しい`classifier`層の重みとバイアス) をランダムに初期化していることを示しています。この場合、これは予想されることです。事前にトレーニングされた重みを持たない新しい頭部を追加しているため、推論に使用する前にこのモデルを微調整する必要があるとライブラリが警告します。これはまさに私たちが行おうとしているものです。する。 **注意** [このチェックポイント](https://huggingface.co/MCG-NJU/videomae-base-finetuned-kinetics) は、同様のダウンストリームで微調整されてチェックポイントが取得されたため、このタスクのパフォーマンスが向上することに注意してください。かなりのドメインの重複があるタスク。 `MCG-NJU/videomae-base-finetuned-kinetics` を微調整して取得した [このチェックポイント](https://huggingface.co/sayakpaul/videomae-base-finetuned-kinetics-finetuned-ucf101-subset) を確認できます。 -キネティクス`。 ## Prepare the datasets for training ビデオの前処理には、[PyTorchVideo ライブラリ](https://pytorchvideo.org/) を利用します。まず、必要な依存関係をインポートします。 ```py >>> import pytorchvideo.data >>> from pytorchvideo.transforms import ( ... ApplyTransformToKey, ... Normalize, ... RandomShortSideScale, ... RemoveKey, ... ShortSideScale, ... UniformTemporalSubsample, ... ) >>> from torchvision.transforms import ( ... Compose, ... Lambda, ... RandomCrop, ... RandomHorizontalFlip, ... Resize, ... ) ``` トレーニング データセットの変換には、均一な時間サブサンプリング、ピクセル正規化、ランダム クロッピング、およびランダムな水平反転を組み合わせて使用​​します。検証および評価のデータセット変換では、ランダムなトリミングと水平反転を除き、同じ変換チェーンを維持します。これらの変換の詳細については、[PyTorchVideo の公式ドキュメント](https://pytorchvideo.org) を参照してください。 事前トレーニングされたモデルに関連付けられた`image_processor`を使用して、次の情報を取得します。 * ビデオ フレームのピクセルが正規化される画像の平均値と標準偏差。 * ビデオ フレームのサイズが変更される空間解像度。 まず、いくつかの定数を定義します。 ```py >>> mean = image_processor.image_mean >>> std = image_processor.image_std >>> if "shortest_edge" in image_processor.size: ... height = width = image_processor.size["shortest_edge"] >>> else: ... height = image_processor.size["height"] ... width = image_processor.size["width"] >>> resize_to = (height, width) >>> num_frames_to_sample = model.config.num_frames >>> sample_rate = 4 >>> fps = 30 >>> clip_duration = num_frames_to_sample * sample_rate / fps ``` 次に、データセット固有の変換とデータセットをそれぞれ定義します。トレーニングセットから始めます: ```py >>> train_transform = Compose( ... [ ... ApplyTransformToKey( ... key="video", ... transform=Compose( ... [ ... UniformTemporalSubsample(num_frames_to_sample), ... Lambda(lambda x: x / 255.0), ... Normalize(mean, std), ... RandomShortSideScale(min_size=256, max_size=320), ... RandomCrop(resize_to), ... RandomHorizontalFlip(p=0.5), ... ] ... ), ... ), ... ] ... ) >>> train_dataset = pytorchvideo.data.Ucf101( ... data_path=os.path.join(dataset_root_path, "train"), ... clip_sampler=pytorchvideo.data.make_clip_sampler("random", clip_duration), ... decode_audio=False, ... transform=train_transform, ... ) ``` 同じ一連のワークフローを検証セットと評価セットに適用できます。 ```py >>> val_transform = Compose( ... [ ... ApplyTransformToKey( ... key="video", ... transform=Compose( ... [ ... UniformTemporalSubsample(num_frames_to_sample), ... Lambda(lambda x: x / 255.0), ... Normalize(mean, std), ... Resize(resize_to), ... ] ... ), ... ), ... ] ... ) >>> val_dataset = pytorchvideo.data.Ucf101( ... data_path=os.path.join(dataset_root_path, "val"), ... clip_sampler=pytorchvideo.data.make_clip_sampler("uniform", clip_duration), ... decode_audio=False, ... transform=val_transform, ... ) >>> test_dataset = pytorchvideo.data.Ucf101( ... data_path=os.path.join(dataset_root_path, "test"), ... clip_sampler=pytorchvideo.data.make_clip_sampler("uniform", clip_duration), ... decode_audio=False, ... transform=val_transform, ... ) ``` **注意**: 上記のデータセット パイプラインは、[公式 PyTorchVideo サンプル](https://pytorchvideo.org/docs/tutorial_classification#dataset) から取得したものです。 [`pytorchvideo.data.Ucf101()`](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.html#pytorchvideo.data.Ucf101) 関数を使用しています。 UCF-101 データセット。内部では、[`pytorchvideo.data.labeled_video_dataset.LabeledVideoDataset`](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.html#pytorchvideo.data.LabeledVideoDataset) オブジェクトを返します。 `LabeledVideoDataset` クラスは、PyTorchVideo データセット内のすべてのビデオの基本クラスです。したがって、PyTorchVideo で既製でサポートされていないカスタム データセットを使用したい場合は、それに応じて `LabeledVideoDataset` クラスを拡張できます。詳細については、`data`API [ドキュメント](https://pytorchvideo.readthedocs.io/en/latest/api/data/data.html)を参照してください。また、データセットが同様の構造 (上に示したもの) に従っている場合は、`pytorchvideo.data.Ucf101()` を使用すると問題なく動作するはずです。 `num_videos` 引数にアクセスすると、データセット内のビデオの数を知ることができます。 ```py >>> print(train_dataset.num_videos, val_dataset.num_videos, test_dataset.num_videos) # (300, 30, 75) ``` ## Visualize the preprocessed video for better debugging ```py >>> import imageio >>> import numpy as np >>> from IPython.display import Image >>> def unnormalize_img(img): ... """Un-normalizes the image pixels.""" ... img = (img * std) + mean ... img = (img * 255).astype("uint8") ... return img.clip(0, 255) >>> def create_gif(video_tensor, filename="sample.gif"): ... """Prepares a GIF from a video tensor. ... ... The video tensor is expected to have the following shape: ... (num_frames, num_channels, height, width). ... """ ... frames = [] ... for video_frame in video_tensor: ... frame_unnormalized = unnormalize_img(video_frame.permute(1, 2, 0).numpy()) ... frames.append(frame_unnormalized) ... kargs = {"duration": 0.25} ... imageio.mimsave(filename, frames, "GIF", **kargs) ... return filename >>> def display_gif(video_tensor, gif_name="sample.gif"): ... """Prepares and displays a GIF from a video tensor.""" ... video_tensor = video_tensor.permute(1, 0, 2, 3) ... gif_filename = create_gif(video_tensor, gif_name) ... return Image(filename=gif_filename) >>> sample_video = next(iter(train_dataset)) >>> video_tensor = sample_video["video"] >>> display_gif(video_tensor) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sample_gif.gif" alt="Person playing basketball"/> </div> ## Train the model 🤗 Transformers の [`Trainer`](https://huggingface.co/docs/transformers/main_classes/trainer) をモデルのトレーニングに利用します。 `Trainer`をインスタンス化するには、トレーニング構成と評価メトリクスを定義する必要があります。最も重要なのは [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.html#transformers.TrainingArguments) で、これはトレーニングを構成するためのすべての属性を含むクラスです。モデルのチェックポイントを保存するために使用される出力フォルダー名が必要です。また、🤗 Hub 上のモデル リポジトリ内のすべての情報を同期するのにも役立ちます。 トレーニング引数のほとんどは一目瞭然ですが、ここで非常に重要なのは`remove_unused_columns=False`です。これにより、モデルの呼び出し関数で使用されない機能が削除されます。デフォルトでは`True`です。これは、通常、未使用の特徴列を削除し、モデルの呼び出し関数への入力を解凍しやすくすることが理想的であるためです。ただし、この場合、`pixel_values` (モデルが入力で期待する必須キーです) を作成するには、未使用の機能 (特に`video`) が必要です。 ```py >>> from transformers import TrainingArguments, Trainer >>> model_name = model_ckpt.split("/")[-1] >>> new_model_name = f"{model_name}-finetuned-ucf101-subset" >>> num_epochs = 4 >>> args = TrainingArguments( ... new_model_name, ... remove_unused_columns=False, ... eval_strategy="epoch", ... save_strategy="epoch", ... learning_rate=5e-5, ... per_device_train_batch_size=batch_size, ... per_device_eval_batch_size=batch_size, ... warmup_ratio=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", ... push_to_hub=True, ... max_steps=(train_dataset.num_videos // batch_size) * num_epochs, ... ) ``` `pytorchvideo.data.Ucf101()` によって返されるデータセットは `__len__` メソッドを実装していません。そのため、`TrainingArguments`をインスタンス化するときに`max_steps`を定義する必要があります。 次に、予測からメトリクスを計算する関数を定義する必要があります。これは、これからロードする`metric`を使用します。必要な前処理は、予測されたロジットの argmax を取得することだけです。 ```py import evaluate metric = evaluate.load("accuracy") def compute_metrics(eval_pred): predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids) ``` **評価に関する注意事項**: [VideoMAE 論文](https://arxiv.org/abs/2203.12602) では、著者は次の評価戦略を使用しています。彼らはテスト ビデオからのいくつかのクリップでモデルを評価し、それらのクリップにさまざまなクロップを適用して、合計スコアを報告します。ただし、単純さと簡潔さを保つために、このチュートリアルではそれを考慮しません。 また、サンプルをまとめてバッチ処理するために使用される `collat​​e_fn` を定義します。各バッチは、`pixel_values` と `labels` という 2 つのキーで構成されます。 ```py >>> def collate_fn(examples): ... # permute to (num_frames, num_channels, height, width) ... pixel_values = torch.stack( ... [example["video"].permute(1, 0, 2, 3) for example in examples] ... ) ... labels = torch.tensor([example["label"] for example in examples]) ... return {"pixel_values": pixel_values, "labels": labels} ``` 次に、これらすべてをデータセットとともに`Trainer`に渡すだけです。 ```py >>> trainer = Trainer( ... model, ... args, ... train_dataset=train_dataset, ... eval_dataset=val_dataset, ... processing_class=image_processor, ... compute_metrics=compute_metrics, ... data_collator=collate_fn, ... ) ``` すでにデータを前処理しているのに、なぜトークナイザーとして`image_processor`を渡したのか不思議に思うかもしれません。これは、イメージ プロセッサ構成ファイル (JSON として保存) もハブ上のリポジトリにアップロードされるようにするためだけです。 次に、`train` メソッドを呼び出してモデルを微調整します。 ```py >>> train_results = trainer.train() ``` トレーニングが完了したら、 [`~transformers.Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、誰もがモデルを使用できるようにします。 ```py >>> trainer.push_to_hub() ``` ## Inference モデルを微調整したので、それを推論に使用できるようになりました。 推論のためにビデオをロードします。 ```py >>> sample_test_video = next(iter(test_dataset)) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sample_gif_two.gif" alt="Teams playing basketball"/> </div> 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#transformers.VideoClassificationPipeline). で使用することです。モデルを使用してビデオ分類用の` pipeline`をインスタンス化し、それにビデオを渡します。 ```py >>> from transformers import pipeline >>> video_cls = pipeline(model="my_awesome_video_cls_model") >>> video_cls("https://huggingface.co/datasets/sayakpaul/ucf101-subset/resolve/main/v_BasketballDunk_g14_c06.avi") [{'score': 0.9272987842559814, 'label': 'BasketballDunk'}, {'score': 0.017777055501937866, 'label': 'BabyCrawling'}, {'score': 0.01663011871278286, 'label': 'BalanceBeam'}, {'score': 0.009560945443809032, 'label': 'BandMarching'}, {'score': 0.0068979403004050255, 'label': 'BaseballPitch'}] ``` 必要に応じて、`pipeline`の結果を手動で複製することもできます。 ```py >>> def run_inference(model, video): ... # (num_frames, num_channels, height, width) ... perumuted_sample_test_video = video.permute(1, 0, 2, 3) ... inputs = { ... "pixel_values": perumuted_sample_test_video.unsqueeze(0), ... "labels": torch.tensor( ... [sample_test_video["label"]] ... ), # this can be skipped if you don't have labels available. ... } ... device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ... inputs = {k: v.to(device) for k, v in inputs.items()} ... model = model.to(device) ... # forward pass ... with torch.no_grad(): ... outputs = model(**inputs) ... logits = outputs.logits ... return logits ``` 次に、入力をモデルに渡し、`logits `を返します。 ```py >>> logits = run_inference(trained_model, sample_test_video["video"]) ``` `logits` をデコードすると、次のようになります。 ```py >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) # Predicted class: BasketballDunk ```
transformers/docs/source/ja/tasks/video_classification.md/0
{ "file_path": "transformers/docs/source/ja/tasks/video_classification.md", "repo_id": "transformers", "token_count": 10027 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Hugging Face Transformers를 추가하는 방법은 무엇인가요? [[how-to-add-a-model-to-transformers]] Hugging Face Transformers 라이브러리는 커뮤니티 기여자들 덕분에 새로운 모델을 제공할 수 있는 경우가 많습니다. 하지만 이는 도전적인 프로젝트이며 Hugging Face Transformers 라이브러리와 구현할 모델에 대한 깊은 이해가 필요합니다. Hugging Face에서는 더 많은 커뮤니티 멤버가 모델을 적극적으로 추가할 수 있도록 지원하고자 하며, 이 가이드를 통해 PyTorch 모델을 추가하는 과정을 안내하고 있습니다 (PyTorch가 설치되어 있는지 확인해주세요). 이 과정을 진행하면 다음과 같은 내용을 이해하게 됩니다: - 오픈 소스의 모범 사례에 대한 통찰력을 얻습니다. - 가장 인기 있는 딥러닝 라이브러리의 설계 원칙을 이해합니다. - 대규모 모델을 효율적으로 테스트하는 방법을 배웁니다. - `black`, `ruff`, `make fix-copies`와 같은 Python 유틸리티를 통합하여 깔끔하고 가독성 있는 코드를 작성하는 방법을 배웁니다. Hugging Face 팀은 항상 도움을 줄 준비가 되어 있으므로 혼자가 아니라는 점을 기억하세요. 🤗 ❤️ 시작에 앞서 🤗 Transformers에 원하는 모델을 추가하기 위해 [New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml) 이슈를 열어야 합니다. 특정 모델을 기여하는 데 특별히 까다로운 기준을 가지지 않는 경우 [New model label](https://github.com/huggingface/transformers/labels/New%20model)을 필터링하여 요청되지 않은 모델이 있는지 확인하고 작업할 수 있습니다. 새로운 모델 요청을 열었다면 첫 번째 단계는 🤗 Transformers에 익숙해지는 것입니다! ## 🤗 Transformers의 전반적인 개요 [[general-overview-of-transformers]] 먼저 🤗 Transformers에 대한 전반적인 개요를 파악해야 합니다. 🤗 Transformers는 매우 주관적인 라이브러리이기 때문에 해당 라이브러리의 철학이나 설계 선택 사항에 동의하지 않을 수도 있습니다. 그러나 우리의 경험상 라이브러리의 기본적인 설계 선택과 철학은 🤗 Transformers의 규모를 효율적으로 확장하면서 유지 보수 비용을 합리적인 수준으로 유지하는 것입니다. [라이브러리의 철학에 대한 문서](philosophy)를 읽는 것이 라이브러리를 더 잘 이해하는 좋은 시작점입니다. 모든 모델에 적용하려는 몇 가지 작업 방식에 대한 선택 사항이 있습니다: - 일반적으로 추상화보다는 구성을 선호합니다. - 코드를 복제하는 것이 항상 나쁜 것은 아닙니다. 코드의 가독성이나 접근성을 크게 향상시킨다면 복제하는 것은 좋습니다. - 모델 파일은 가능한 한 독립적으로 유지되어야 합니다. 따라서 특정 모델의 코드를 읽을 때 해당 `modeling_....py` 파일만 확인하면 됩니다. 우리는 라이브러리의 코드가 제품을 제공하는 수단뿐만 아니라 개선하고자 하는 제품이라고도 생각합니다. 따라서 모델을 추가할 때, 사용자는 모델을 사용할 사람뿐만 아니라 코드를 읽고 이해하고 필요한 경우 조정할 수 있는 모든 사람까지도 포함한다는 점을 기억해야 합니다. 이를 염두에 두고 일반적인 라이브러리 설계에 대해 조금 더 자세히 알아보겠습니다. ### 모델 개요 [[overview-of-models]] 모델을 성공적으로 추가하려면 모델과 해당 구성인 [`PreTrainedModel`] 및 [`PretrainedConfig`] 간의 상호작용을 이해하는 것이 중요합니다. 예를 들어, 🤗 Transformers에 추가하려는 모델을 `BrandNewBert`라고 부르겠습니다. 다음을 살펴보겠습니다: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> 보다시피, 🤗 Transformers에서는 상속을 사용하지만 추상화 수준을 최소한으로 유지합니다. 라이브러리의 어떤 모델에서도 두 수준 이상의 추상화가 존재하지 않습니다. `BrandNewBertModel`은 `BrandNewBertPreTrainedModel`에서 상속받고, 이 클래스는 [`PreTrainedModel`]에서 상속받습니다. 이로써 새로운 모델은 [`PreTrainedModel`]에만 의존하도록 하려고 합니다. 모든 새로운 모델에 자동으로 제공되는 중요한 기능은 [`~PreTrainedModel.from_pretrained`] 및 [`~PreTrainedModel.save_pretrained`]입니다. 이러한 기능 외에도 `BrandNewBertModel.forward`와 같은 다른 중요한 기능은 새로운 `modeling_brand_new_bert.py` 스크립트에서 완전히 정의되어야 합니다. 또한 `BrandNewBertForMaskedLM`과 같은 특정 헤드 레이어를 가진 모델은 `BrandNewBertModel`을 상속받지 않고 forward pass에서 호출할 수 있는 `BrandNewBertModel`을 사용하여 추상화 수준을 낮게 유지합니다. 모든 새로운 모델은 `BrandNewBertConfig`라는 구성 클래스를 필요로 합니다. 이 구성은 항상 [`PreTrainedModel`]의 속성으로 저장되며, 따라서 `BrandNewBertPreTrainedModel`을 상속받는 모든 클래스에서 `config` 속성을 통해 액세스할 수 있습니다: ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` 모델과 마찬가지로 구성은 [`PretrainedConfig`]에서 기본 직렬화 및 역직렬화 기능을 상속받습니다. 구성과 모델은 항상 *pytorch_model.bin* 파일과 *config.json* 파일로 각각 별도로 직렬화됩니다. [`~PreTrainedModel.save_pretrained`]를 호출하면 자동으로 [`~PretrainedConfig.save_pretrained`]도 호출되므로 모델과 구성이 모두 저장됩니다. ### 코드 스타일 [[code-style]] 새로운 모델을 작성할 때, Transformers는 주관적인 라이브러리이며 몇 가지 독특한 코딩 스타일이 있습니다: 1. 모델의 forward pass는 모델 파일에 완전히 작성되어야 합니다. 라이브러리의 다른 모델에서 블록을 재사용하려면 코드를 복사하여 위에 `# Copied from` 주석과 함께 붙여넣으면 됩니다 (예: [여기](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160)를 참조하세요). 2. 코드는 완전히 이해하기 쉬워야 합니다. 변수 이름을 명확하게 지정하고 약어를 사용하지 않는 것이 좋습니다. 예를 들어, `act`보다는 `activation`을 선호합니다. 한 글자 변수 이름은 루프의 인덱스인 경우를 제외하고 권장되지 않습니다. 3. 더 일반적으로, 짧은 마법 같은 코드보다는 길고 명시적인 코드를 선호합니다. 4. PyTorch에서 `nn.Sequential`을 하위 클래스로 만들지 말고 `nn.Module`을 하위 클래스로 만들고 forward pass를 작성하여 다른 사람이 코드를 빠르게 디버그할 수 있도록 합니다. print 문이나 중단점을 추가할 수 있습니다. 5. 함수 시그니처에는 타입 주석을 사용해야 합니다. 그 외에는 타입 주석보다 변수 이름이 훨씬 읽기 쉽고 이해하기 쉽습니다. ### 토크나이저 개요 [[overview-of-tokenizers]] 아직 준비되지 않았습니다 :-( 이 섹션은 곧 추가될 예정입니다! ## 🤗 Transformers에 모델 추가하는 단계별 방법 [[stepbystep-recipe-to-add-a-model-to-transformers]] 각자 모델을 이식하는 방법에 대한 선호가 다르기 때문에 다른 기여자들이 Hugging Face에 모델을 이식하는 방법에 대한 요약을 살펴보는 것이 매우 유용할 수 있습니다. 다음은 모델을 이식하는 방법에 대한 커뮤니티 블로그 게시물 목록입니다: 1. [GPT2 모델 이식하기](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) - [Thomas](https://huggingface.co/thomwolf) 2. [WMT19 MT 모델 이식하기](https://huggingface.co/blog/porting-fsmt) - [Stas](https://huggingface.co/stas) 경험상 모델을 추가할 때 주의해야 할 가장 중요한 사항은 다음과 같습니다: - 같은 일을 반복하지 마세요! 새로운 🤗 Transformers 모델을 위해 추가할 코드의 대부분은 이미 🤗 Transformers 어딘가에 존재합니다. 이미 존재하는 복사할 수 있는 유사한 모델과 토크나이저를 찾는데 시간을 투자하세요. [grep](https://www.gnu.org/software/grep/)와 [rg](https://github.com/BurntSushi/ripgrep)를 참고하세요. 모델의 토크나이저가 한 모델을 기반으로 하고 모델링 코드가 다른 모델을 기반으로 하는 경우가 존재할 수도 있습니다. 예를 들어 FSMT의 모델링 코드는 BART를 기반으로 하고 FSMT의 토크나이저 코드는 XLM을 기반으로 합니다. - 이것은 과학적인 도전보다는 공학적인 도전입니다. 논문의 모델의 모든 이론적 측면을 이해하려는 것보다 효율적인 디버깅 환경을 만드는 데 더 많은 시간을 소비해야 합니다. - 막힐 때 도움을 요청하세요! 모델은 🤗 Transformers의 핵심 구성 요소이므로 Hugging Face의 우리는 당신이 모델을 추가하는 각 단계에서 기꺼이 도움을 줄 준비가 되어 있습니다. 진전이 없다고 느끼면 주저하지 말고 도움을 요청하세요. 다음에서는 모델을 🤗 Transformers로 이식하는 데 가장 유용한 일반적인 절차를 제공하려고 노력합니다. 다음 목록은 모델을 추가하는 데 수행해야 할 모든 작업의 요약이며 To-Do 목록으로 사용할 수 있습니다: ☐ (선택 사항) BrandNewBert의 이론적 측면 이해<br> ☐ Hugging Face 개발 환경 준비<br> ☐ 원본 리포지토리의 디버깅 환경 설정<br> ☐ 원본 리포지토리와 체크포인트를 사용하여 `forward()` pass가 성공적으로 실행되는 스크립트 작성<br> ☐ 🤗 Transformers에 모델 스켈레톤 성공적으로 추가<br> ☐ 원본 체크포인트를 🤗 Transformers 체크포인트로 성공적으로 변환<br> ☐ 🤗 Transformers에서 원본 체크포인트와 동일한 출력을 내주는 `forward()` pass 성공적으로 실행<br> ☐ 🤗 Transformers에서 모델 테스트 완료<br> ☐ 🤗 Transformers에 토크나이저 성공적으로 추가<br> ☐ 종단 간 통합 테스트 실행<br> ☐ 문서 작성 완료<br> ☐ 모델 가중치를 허브에 업로드<br> ☐ Pull request 제출<br> ☐ (선택 사항) 데모 노트북 추가 우선, 일반적으로는 `BrandNewBert`의 이론적인 이해로 시작하는 것을 권장합니다. 그러나 이론적 측면을 직접 이해하는 대신 *직접 해보면서* 모델의 이론적 측면을 이해하는 것을 선호하는 경우 바로 `BrandNewBert` 코드 베이스로 빠져드는 것도 괜찮습니다. 이 옵션은 엔지니어링 기술이 이론적 기술보다 더 뛰어난 경우, `BrandNewBert`의 논문을 이해하는 데 어려움이 있는 경우, 또는 과학적인 논문을 읽는 것보다 프로그래밍에 훨씬 더 흥미 있는 경우에 더 적합할 수 있습니다. ### 1. (선택 사항) BrandNewBert의 이론적 측면 [[1-optional-theoretical-aspects-of-brandnewbert]] 만약 그런 서술적인 작업이 존재한다면, *BrandNewBert*의 논문을 읽어보는 시간을 가져야 합니다. 이해하기 어려운 섹션이 많을 수 있습니다. 그렇더라도 걱정하지 마세요! 목표는 논문의 깊은 이론적 이해가 아니라 *BrandNewBert*를 🤗 Transformers에서 효과적으로 재구현하기 위해 필요한 정보를 추출하는 것입니다. 이를 위해 이론적 측면에 너무 많은 시간을 투자할 필요는 없지만 다음과 같은 실제적인 측면에 집중해야 합니다: - *BrandNewBert*는 어떤 유형의 모델인가요? BERT와 유사한 인코더 모델인가요? GPT2와 유사한 디코더 모델인가요? BART와 유사한 인코더-디코더 모델인가요? 이들 간의 차이점에 익숙하지 않은 경우[model_summary](model_summary)를 참조하세요. - *BrandNewBert*의 응용 분야는 무엇인가요? 텍스트 분류인가요? 텍스트 생성인가요? 요약과 같은 Seq2Seq 작업인가요? - *brand_new_bert*와 BERT/GPT-2/BART의 차이점은 무엇인가요? - *brand_new_bert*와 가장 유사한 [🤗 Transformers 모델](https://huggingface.co/transformers/#contents)은 무엇인가요? - 어떤 종류의 토크나이저가 사용되나요? Sentencepiece 토크나이저인가요? Word piece 토크나이저인가요? BERT 또는 BART에 사용되는 동일한 토크나이저인가요? 모델의 아키텍처에 대해 충분히 이해했다는 생각이 든 후, 궁금한 사항이 있으면 Hugging Face 팀에 문의하십시오. 이는 모델의 아키텍처, 어텐션 레이어 등에 관한 질문을 포함할 수 있습니다. Hugging Face의 유지 관리자들은 보통 코드를 검토하는 것에 대해 매우 기뻐하므로 당신을 돕는 일을 매우 환영할 것입니다! ### 2. 개발 환경 설정 [[2-next-prepare-your-environment]] 1. 저장소 페이지에서 "Fork" 버튼을 클릭하여 저장소의 사본을 GitHub 사용자 계정으로 만듭니다. 2. `transformers` fork를 로컬 디스크에 클론하고 베이스 저장소를 원격 저장소로 추가합니다: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. 개발 환경을 설정합니다. 다음 명령을 실행하여 개발 환경을 설정할 수 있습니다: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` 각 운영 체제에 따라 Transformers의 선택적 의존성이 개수가 증가하면 이 명령이 실패할 수 있습니다. 그런 경우에는 작업 중인 딥 러닝 프레임워크 (PyTorch, TensorFlow 및/또는 Flax)을 설치한 후, 다음 명령을 수행하면 됩니다: ```bash pip install -e ".[quality]" ``` 대부분의 경우에는 이것으로 충분합니다. 그런 다음 상위 디렉토리로 돌아갑니다. ```bash cd .. ``` 4. Transformers에 *brand_new_bert*의 PyTorch 버전을 추가하는 것을 권장합니다. PyTorch를 설치하려면 다음 링크의 지침을 따르십시오: https://pytorch.org/get-started/locally/. **참고:** CUDA를 설치할 필요는 없습니다. 새로운 모델이 CPU에서 작동하도록 만드는 것으로 충분합니다. 5. *brand_new_bert*를 이식하기 위해서는 해당 원본 저장소에 접근할 수 있어야 합니다: ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` 이제 *brand_new_bert*를 🤗 Transformers로 이식하기 위한 개발 환경을 설정하였습니다. ### 3.-4. 원본 저장소에서 사전 훈련된 체크포인트 실행하기 [[3.-4.-run-a-pretrained-checkpoint-using-the-original-repository]] 먼저, 원본 *brand_new_bert* 저장소에서 작업을 시작합니다. 원본 구현은 보통 "연구용"으로 많이 사용됩니다. 즉, 문서화가 부족하고 코드가 이해하기 어려울 수 있습니다. 그러나 이것이 바로 *brand_new_bert*를 다시 구현하려는 동기가 되어야 합니다. Hugging Face에서의 주요 목표 중 하나는 **거인의 어깨 위에 서는 것**이며, 이는 여기에서 쉽게 해석되어 동작하는 모델을 가져와서 가능한 한 **접근 가능하고 사용자 친화적이며 아름답게** 만드는 것입니다. 이것은 🤗 Transformers에서 모델을 다시 구현하는 가장 중요한 동기입니다 - 새로운 복잡한 NLP 기술을 **모두에게** 접근 가능하게 만드는 것을 목표로 합니다. 따라서 원본 저장소에 대해 자세히 살펴보는 것으로 시작해야 합니다. 원본 저장소에서 공식 사전 훈련된 모델을 성공적으로 실행하는 것은 종종 **가장 어려운** 단계입니다. 우리의 경험에 따르면, 원본 코드 베이스에 익숙해지는 데 시간을 투자하는 것이 매우 중요합니다. 다음을 파악해야 합니다: - 사전 훈련된 가중치를 어디서 찾을 수 있는지? - 사전 훈련된 가중치를 해당 모델에로드하는 방법은? - 모델과 독립적으로 토크나이저를 실행하는 방법은? - 간단한 forward pass에 필요한 클래스와 함수를 파악하기 위해 forward pass를 한 번 추적해 보세요. 일반적으로 해당 함수들만 다시 구현하면 됩니다. - 모델의 중요한 구성 요소를 찾을 수 있어야 합니다. 모델 클래스는 어디에 있나요? 모델 하위 클래스(*EncoderModel*, *DecoderModel* 등)가 있나요? self-attention 레이어는 어디에 있나요? self-attention, cross-attention 등 여러 가지 다른 어텐션 레이어가 있나요? - 원본 환경에서 모델을 디버그할 수 있는 방법은 무엇인가요? *print* 문을 추가해야 하나요? *ipdb*와 같은 대화식 디버거를 사용할 수 있나요? PyCharm과 같은 효율적인 IDE를 사용해 모델을 디버그할 수 있나요? 원본 저장소에서 코드를 이식하는 작업을 시작하기 전에 원본 저장소에서 코드를 **효율적으로** 디버그할 수 있어야 합니다! 또한, 오픈 소스 라이브러리로 작업하고 있다는 것을 기억해야 합니다. 따라서 원본 저장소에서 issue를 열거나 pull request를 열기를 주저하지 마십시오. 이 저장소의 유지 관리자들은 누군가가 자신들의 코드를 살펴본다는 것에 대해 매우 기뻐할 것입니다! 현재 시점에서, 원래 모델을 디버깅하기 위해 어떤 디버깅 환경과 전략을 선호하는지는 당신에게 달렸습니다. 우리는 고가의 GPU 환경을 구축하는 것은 비추천합니다. 대신, 원래 저장소로 들어가서 작업을 시작할 때와 🤗 Transformers 모델의 구현을 시작할 때에도 CPU에서 작업하는 것이 좋습니다. 모델이 이미 🤗 Transformers로 성공적으로 이식되었을 때에만 모델이 GPU에서도 예상대로 작동하는지 확인해야합니다. 일반적으로, 원래 모델을 실행하기 위한 두 가지 가능한 디버깅 환경이 있습니다. - [Jupyter 노트북](https://jupyter.org/) / [Google Colab](https://colab.research.google.com/notebooks/intro.ipynb) - 로컬 Python 스크립트 Jupyter 노트북의 장점은 셀 단위로 실행할 수 있다는 것입니다. 이는 논리적인 구성 요소를 더 잘 분리하고 중간 결과를 저장할 수 있으므로 디버깅 사이클이 더 빨라질 수 있습니다. 또한, 노트북은 다른 기여자와 쉽게 공유할 수 있으므로 Hugging Face 팀의 도움을 요청하려는 경우 매우 유용할 수 있습니다. Jupyter 노트북에 익숙하다면 이를 사용하는 것을 강력히 추천합니다. Jupyter 노트북의 단점은 사용에 익숙하지 않은 경우 새로운 프로그래밍 환경에 적응하는 데 시간을 할애해야 하며, `ipdb`와 같은 알려진 디버깅 도구를 더 이상 사용할 수 없을 수도 있다는 것입니다. 각 코드 베이스에 대해 좋은 첫 번째 단계는 항상 **작은** 사전 훈련된 체크포인트를 로드하고 더미 정수 벡터 입력을 사용하여 단일 forward pass를 재현하는 것입니다. 이와 같은 스크립트는 다음과 같을 수 있습니다(의사 코드로 작성): ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` 다음으로, 디버깅 전략에 대해 일반적으로 다음과 같은 몇 가지 선택지가 있습니다: - 원본 모델을 많은 작은 테스트 가능한 구성 요소로 분해하고 각각에 대해 forward pass를 실행하여 검증합니다. - 원본 모델을 원본 *tokenizer*과 원본 *model*로만 분해하고 해당 부분에 대해 forward pass를 실행한 후 검증을 위해 중간 출력(print 문 또는 중단점)을 사용합니다. 다시 말하지만, 어떤 전략을 선택할지는 당신에게 달려 있습니다. 원본 코드 베이스에 따라 하나 또는 다른 전략이 유리할 수 있습니다. 원본 코드 베이스를 모델의 작은 하위 구성 요소로 분해할 수 있는지 여부, 예를 들어 원본 코드 베이스가 즉시 실행 모드에서 간단히 실행될 수 있는 경우, 그런 경우에는 그 노력이 가치가 있다는 것이 일반적입니다. 초기에 더 어려운 방법을 선택하는 것에는 몇 가지 중요한 장점이 있습니다. - 원본 모델을 🤗 Transformers 구현과 비교할 때 각 구성 요소가 일치하는지 자동으로 확인할 수 있습니다. 즉, 시각적인 비교(print 문을 통한 비교가 아닌) 대신 🤗 Transformers 구현과 그에 대응하는 원본 구성 요소가 일치하는지 확인할 수 있습니다. - 전체 모델을 모듈별로, 즉 작은 구성 요소로 분해함으로써 모델을 이식하는 큰 문제를 단순히 개별 구성 요소를 이식하는 작은 문제로 분해할 수 있으므로 작업을 더 잘 구조화할 수 있습니다. - 모델을 논리적으로 의미 있는 구성 요소로 분리하는 것은 모델의 설계에 대한 더 나은 개요를 얻고 모델을 더 잘 이해하는 데 도움이 됩니다. - 이러한 구성 요소별 테스트를 통해 코드를 변경하면서 회귀가 발생하지 않도록 보장할 수 있습니다. [Lysandre의 ELECTRA 통합 검사](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed)는 이를 수행하는 좋은 예제입니다. 그러나 원본 코드 베이스가 매우 복잡하거나 중간 구성 요소를 컴파일된 모드에서 실행하는 것만 허용하는 경우, 모델을 테스트 가능한 작은 하위 구성 요소로 분해하는 것이 시간이 많이 소요되거나 불가능할 수도 있습니다. [T5의 MeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow) 라이브러리는 매우 복잡하며 모델을 하위 구성 요소로 분해하는 간단한 방법을 제공하지 않습니다. 이러한 라이브러리의 경우, 보통 print 문을 통해 확인합니다. 어떤 전략을 선택하더라도 권장되는 절차는 동일합니다. 먼저 시작 레이어를 디버그하고 마지막 레이어를 마지막에 디버그하는 것이 좋습니다. 다음 순서로 각 레이어의 출력을 검색하는 것이 좋습니다: 1. 모델에 전달된 입력 ID 가져오기 2. 워드 임베딩 가져오기 3. 첫 번째 Transformer 레이어의 입력 가져오기 4. 첫 번째 Transformer 레이어의 출력 가져오기 5. 다음 n-1개의 Transformer 레이어의 출력 가져오기 6. BrandNewBert 모델의 출력 가져오기 입력 ID는 정수 배열로 구성되며, 예를 들어 `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]`와 같을 수 있습니다. 다음 레이어의 출력은 종종 다차원 실수 배열로 구성되며, 다음과 같이 나타낼 수 있습니다: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` 🤗 Transformers에 추가되는 모든 모델은 통합 테스트를 통과해야 합니다. 즉, 원본 모델과 🤗 Transformers의 재구현 버전이 0.001의 정밀도로 정확히 동일한 출력을 내야 합니다! 동일한 모델이 다른 라이브러리에서 작성되었을 때 라이브러리 프레임워크에 따라 약간 다른 출력을 얻는 것은 정상이므로 1e-3(0.001)의 오차는 허용합니다. 거의 동일한 출력을 내는 것만으로는 충분하지 않으며, 완벽히 일치하는 수준이어야 합니다. 따라서 🤗 Transformers 버전의 중간 출력을 *brand_new_bert*의 원래 구현의 중간 출력과 여러 번 비교해야 합니다. 이 경우 원본 저장소의 **효율적인** 디버깅 환경이 절대적으로 중요합니다. 디버깅 환경을 가능한 한 효율적으로 만드는 몇 가지 조언을 제시합니다. - 중간 결과를 디버그하는 가장 좋은 방법을 찾으세요. 원본 저장소가 PyTorch로 작성되었다면 원본 모델을 더 작은 하위 구성 요소로 분해하여 중간 값을 검색하는 긴 스크립트를 작성하는 것에 시간을 투자할 가치가 있습니다. 원본 저장소가 Tensorflow 1로 작성되었다면 [tf.print](https://www.tensorflow.org/api_docs/python/tf/print)와 같은 Tensorflow 출력 작업을 사용하여 중간 값을 출력해야 할 수도 있습니다. 원본 저장소가 Jax로 작성되었다면 forward pass를 실행할 때 모델이 **jit 되지 않도록** 해야 합니다. 예를 들어 [이 링크](https://github.com/google/jax/issues/196)를 확인해 보세요. - 사용 가능한 가장 작은 사전 훈련된 체크포인트를 사용하세요. 체크포인트가 작을수록 디버그 사이클이 더 빨라집니다. 전반적으로 forward pass에 10초 이상이 걸리는 경우 효율적이지 않습니다. 매우 큰 체크포인트만 사용할 수 있는 경우, 새 환경에서 임의로 초기화된 가중치로 더미 모델을 만들고 해당 가중치를 🤗 Transformers 버전과 비교하기 위해 저장하는 것이 더 의미가 있을 수 있습니다. - 디버깅 설정에서 가장 쉽게 forward pass를 호출하는 방법을 사용하세요. 원본 저장소에서 **단일** forward pass만 호출하는 함수를 찾는 것이 이상적입니다. 이 함수는 일반적으로 `predict`, `evaluate`, `forward`, `__call__`과 같이 호출됩니다. `autoregressive_sample`과 같은 텍스트 생성에서 `forward`를 여러 번 호출하여 텍스트를 생성하는 등의 작업을 수행하는 함수를 디버그하고 싶지 않을 것입니다. - 토큰화 과정을 모델의 *forward* pass와 분리하려고 노력하세요. 원본 저장소에서 입력 문자열을 입력해야 하는 예제가 있는 경우, 입력 문자열이 입력 ID로 변경되는 순간을 찾아서 시작하세요. 이 경우 직접 ID를 입력할 수 있도록 작은 스크립트를 작성하거나 원본 코드를 수정해야 할 수도 있습니다. - 디버깅 설정에서 모델이 훈련 모드가 아니라는 것을 확인하세요. 훈련 모드에서는 모델의 여러 드롭아웃 레이어 때문에 무작위 출력이 생성될 수 있습니다. 디버깅 환경에서 forward pass가 **결정론적**이도록 해야 합니다. 또는 동일한 프레임워크에 있는 경우 *transformers.utils.set_seed*를 사용하세요. 다음 섹션에서는 *brand_new_bert*에 대해 이 작업을 수행하는 데 더 구체적인 세부 사항/팁을 제공합니다. ### 5.-14. 🤗 Transformers에 BrandNewBert를 이식하기 [[5.-14.-port-brandnewbert-to-transformers]] 이제, 마침내 🤗 Transformers에 새로운 코드를 추가할 수 있습니다. 🤗 Transformers 포크의 클론으로 이동하세요: ```bash cd transformers ``` 다음과 같이 이미 존재하는 모델의 모델 아키텍처와 정확히 일치하는 모델을 추가하는 특별한 경우에는 [이 섹션](#write-a-conversion-script)에 설명된대로 변환 스크립트만 추가하면 됩니다. 이 경우에는 이미 존재하는 모델의 전체 모델 아키텍처를 그대로 재사용할 수 있습니다. 그렇지 않으면 새 모델 생성을 시작하겠습니다. 다음 스크립트를 사용하여 다음에서 시작하는 모델을 추가하는 것이 좋습니다. 기존 모델: ```bash transformers-cli add-new-model-like ``` 모델의 기본 정보를 입력하는 설문지가 표시됩니다. **huggingface/transformers 메인 저장소에 Pull Request 열기** 자동으로 생성된 코드를 수정하기 전에, 지금은 "작업 진행 중 (WIP)" 풀 리퀘스트를 열기 위한 시기입니다. 예를 들어, 🤗 Transformers에 "*brand_new_bert* 추가"라는 제목의 "[WIP] Add *brand_new_bert*" 풀 리퀘스트를 엽니다. 이렇게 하면 당신과 Hugging Face 팀이 🤗 Transformers에 모델을 통합하는 작업을 함께할 수 있습니다. 다음을 수행해야 합니다: 1. 메인 브랜치에서 작업을 잘 설명하는 이름으로 브랜치 생성 ```bash git checkout -b add_brand_new_bert ``` 2. 자동으로 생성된 코드 커밋 ```bash git add . git commit ``` 3. 현재 메인을 가져오고 리베이스 ```bash git fetch upstream git rebase upstream/main ``` 4. 변경 사항을 계정에 푸시 ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. 만족스럽다면, GitHub에서 자신의 포크한 웹 페이지로 이동합니다. "Pull request"를 클릭합니다. Hugging Face 팀의 일부 멤버의 GitHub 핸들을 리뷰어로 추가하여 Hugging Face 팀이 앞으로의 변경 사항에 대해 알림을 받을 수 있도록 합니다. 6. GitHub 풀 리퀘스트 웹 페이지 오른쪽에 있는 "Convert to draft"를 클릭하여 PR을 초안으로 변경합니다. 다음으로, 어떤 진전을 이루었다면 작업을 커밋하고 계정에 푸시하여 풀 리퀘스트에 표시되도록 해야 합니다. 또한, 다음과 같이 현재 메인과 작업을 업데이트해야 합니다: ```bash git fetch upstream git merge upstream/main ``` 일반적으로, 모델 또는 구현에 관한 모든 질문은 자신의 PR에서 해야 하며, PR에서 토론되고 해결되어야 합니다. 이렇게 하면 Hugging Face 팀이 새로운 코드를 커밋하거나 질문을 할 때 항상 알림을 받을 수 있습니다. Hugging Face 팀에게 문제 또는 질문을 효율적으로 이해할 수 있도록 추가한 코드를 명시하는 것이 도움이 될 때가 많습니다. 이를 위해, 변경 사항을 모두 볼 수 있는 "Files changed" 탭으로 이동하여 질문하고자 하는 줄로 이동한 다음 "+" 기호를 클릭하여 코멘트를 추가할 수 있습니다. 질문이나 문제가 해결되면, 생성된 코멘트의 "Resolve" 버튼을 클릭할 수 있습니다. 마찬가지로, Hugging Face 팀은 코드를 리뷰할 때 코멘트를 남길 것입니다. 우리는 PR에서 대부분의 질문을 GitHub에서 묻는 것을 권장합니다. 공개에 크게 도움이 되지 않는 매우 일반적인 질문의 경우, Slack이나 이메일을 통해 Hugging Face 팀에게 문의할 수 있습니다. **5. brand_new_bert에 대해 생성된 모델 코드를 적용하기** 먼저, 우리는 모델 자체에만 초점을 맞추고 토크나이저에 대해서는 신경 쓰지 않을 것입니다. 모든 관련 코드는 다음의 생성된 파일에서 찾을 수 있습니다: `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` 및 `src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. 이제 마침내 코딩을 시작할 수 있습니다 :). `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`의 생성된 코드는 인코더 전용 모델인 경우 BERT와 동일한 아키텍처를 가지거나, 인코더-디코더 모델인 경우 BART와 동일한 아키텍처를 가질 것입니다. 이 시점에서, 모델의 이론적 측면에 대해 배운 내용을 다시 상기해야 합니다: *모델이 BERT 또는 BART와 어떻게 다른가요?*. 자주 변경해야 하는 것은 *self-attention* 레이어, 정규화 레이어의 순서 등을 변경하는 것입니다. 다시 말하지만, 자신의 모델을 구현하는 데 도움이 되도록 Transformers에서 이미 존재하는 모델의 유사한 아키텍처를 살펴보는 것이 유용할 수 있습니다. **참고로** 이 시점에서, 코드가 완전히 정확하거나 깨끗하다고 확신할 필요는 없습니다. 오히려 처음에는 원본 코드의 첫 번째 *불완전하고* 복사된 버전을 `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`에 추가하는 것이 좋습니다. 필요한 모든 코드가 추가될 때까지 이러한 작업을 진행한 후, 다음 섹션에서 설명한 변환 스크립트를 사용하여 코드를 점진적으로 개선하고 수정하는 것이 훨씬 효율적입니다. 이 시점에서 작동해야 하는 유일한 것은 다음 명령이 작동하는 것입니다: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` 위의 명령은 `BrandNewBertConfig()`에 정의된 기본 매개변수에 따라 무작위 가중치로 모델을 생성하며, 이로써 모든 구성 요소의 `init()` 메서드가 작동함을 보장합니다. 모든 무작위 초기화는 `BrandnewBertPreTrainedModel` 클래스의 `_init_weights` 메서드에서 수행되어야 합니다. 이 메서드는 구성 설정 변수에 따라 모든 리프 모듈을 초기화해야 합니다. BERT의 `_init_weights` 메서드 예제는 다음과 같습니다: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ``` 몇 가지 모듈에 대해 특별한 초기화가 필요한 경우 사용자 정의 방식을 사용할 수도 있습니다. 예를 들어, `Wav2Vec2ForPreTraining`에서 마지막 두 개의 선형 레이어는 일반적인 PyTorch `nn.Linear`의 초기화를 가져야 하지만, 다른 모든 레이어는 위와 같은 초기화를 사용해야 합니다. 이는 다음과 같이 코드화됩니다: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2ForPreTraining): module.project_hid.reset_parameters() module.project_q.reset_parameters() module.project_hid._is_hf_initialized = True module.project_q._is_hf_initialized = True elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() ``` `_is_hf_initialized` 플래그는 서브모듈을 한 번만 초기화하도록 내부적으로 사용됩니다. `module.project_q` 및 `module.project_hid`에 대해 `True`로 설정함으로써, 우리가 수행한 사용자 정의 초기화가 이후에 덮어쓰이지 않도록 합니다. 즉, `_init_weights` 함수가 이들에게 적용되지 않습니다. **6. 변환 스크립트 작성하기** 다음으로, 디버그에 사용한 체크포인트를 기존 저장소에서 만든 🤗 Transformers 구현과 호환되는 체크포인트로 변환할 수 있는 변환 스크립트를 작성해야 합니다. 변환 스크립트를 처음부터 작성하는 것보다는 *brand_new_bert*와 동일한 프레임워크로 작성된 유사한 모델을 변환한 기존 변환 스크립트를 찾아보는 것이 좋습니다. 일반적으로 기존 변환 스크립트를 복사하여 사용 사례에 맞게 약간 수정하는 것으로 충분합니다. 모델에 대해 유사한 기존 변환 스크립트를 어디에서 찾을 수 있는지 Hugging Face 팀에게 문의하는 것을 망설이지 마세요. - TensorFlow에서 PyTorch로 모델을 이전하는 경우, 좋은 참고 자료로 BERT의 변환 스크립트 [여기](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91)를 참조할 수 있습니다. - PyTorch에서 PyTorch로 모델을 이전하는 경우, 좋은 참고 자료로 BART의 변환 스크립트 [여기](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py)를 참조할 수 있습니다. 다음에서는 PyTorch 모델이 레이어 가중치를 저장하고 레이어 이름을 정의하는 방법에 대해 간단히 설명하겠습니다. PyTorch에서 레이어의 이름은 레이어에 지정한 클래스 속성의 이름으로 정의됩니다. 다음과 같이 PyTorch에서 `SimpleModel`이라는 더미 모델을 정의해 봅시다: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` 이제 이 모델 정의의 인스턴스를 생성할 수 있으며 `dense`, `intermediate`, `layer_norm` 등의 가중치가 랜덤하게 할당됩니다. 모델을 출력하여 아키텍처를 확인할 수 있습니다. ```python model = SimpleModel() print(model) ``` 이는 다음과 같이 출력됩니다: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` 우리는 레이어의 이름이 PyTorch에서 클래스 속성의 이름으로 정의되어 있는 것을 볼 수 있습니다. 특정 레이어의 가중치 값을 출력하여 확인할 수 있습니다: ```python print(model.dense.weight.data) ``` 가중치가 무작위로 초기화되었음을 확인할 수 있습니다. ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` 변환 스크립트에서는 이러한 무작위로 초기화된 가중치를 체크포인트의 해당 레이어의 정확한 가중치로 채워야 합니다. 예를 들면 다음과 같습니다: ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` 이렇게 하면 PyTorch 모델의 무작위로 초기화된 각 가중치와 해당 체크포인트 가중치가 **모양과 이름** 모두에서 정확히 일치하는지 확인해야 합니다. 이를 위해 모양에 대한 assert 문을 추가하고 체크포인트 가중치의 이름을 출력해야 합니다. 예를 들어 다음과 같은 문장을 추가해야 합니다: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` 또한 두 가중치의 이름을 출력하여 일치하는지 확인해야 합니다. *예시*: ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` 모양 또는 이름이 일치하지 않는 경우, 랜덤으로 초기화된 레이어에 잘못된 체크포인트 가중치를 할당한 것으로 추측됩니다. 잘못된 모양은 `BrandNewBertConfig()`의 구성 매개변수 설정이 변환하려는 체크포인트에 사용된 설정과 정확히 일치하지 않기 때문일 가능성이 가장 큽니다. 그러나 PyTorch의 레이어 구현 자체에서 가중치를 전치해야 할 수도 있습니다. 마지막으로, **모든** 필요한 가중치가 초기화되었는지 확인하고 초기화에 사용되지 않은 모든 체크포인트 가중치를 출력하여 모델이 올바르게 변환되었는지 확인해야 합니다. 잘못된 모양 문장이나 잘못된 이름 할당으로 인해 변환 시도가 실패하는 것은 완전히 정상입니다. 이는 `BrandNewBertConfig()`에서 잘못된 매개변수를 사용하거나 🤗 Transformers 구현에서 잘못된 아키텍처, 🤗 Transformers 구현의 구성 요소 중 하나의 `init()` 함수에 버그가 있는 경우이거나 체크포인트 가중치 중 하나를 전치해야 하는 경우일 가능성이 가장 높습니다. 이 단계는 이전 단계와 함께 반복되어야 하며 모든 체크포인트의 가중치가 Transformers 모델에 올바르게 로드되었을 때까지 계속되어야 합니다. 🤗 Transformers 구현에 체크포인트를 올바르게 로드한 후에는 `/path/to/converted/checkpoint/folder`와 같은 원하는 폴더에 모델을 저장할 수 있어야 합니다. 해당 폴더에는 `pytorch_model.bin` 파일과 `config.json` 파일이 모두 포함되어야 합니다. ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. 순방향 패스 구현하기** 🤗 Transformers 구현에 사전 훈련된 가중치를 정확하게 로드한 후에는 순방향 패스가 올바르게 구현되었는지 확인해야 합니다. [원본 저장소에 익숙해지기](#3-4-run-a-pretrained-checkpoint-using-the-original-repository)에서 이미 원본 저장소를 사용하여 모델의 순방향 패스를 실행하는 스크립트를 만들었습니다. 이제 원본 대신 🤗 Transformers 구현을 사용하는 유사한 스크립트를 작성해야 합니다. 다음과 같이 작성되어야 합니다: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` 🤗 Transformers 구현과 원본 모델 구현이 처음부터 정확히 동일한 출력을 제공하지 않거나 순방향 패스에서 오류가 발생할 가능성이 매우 높습니다. 실망하지 마세요. 예상된 일입니다! 먼저, 순방향 패스에서 오류가 발생하지 않도록 해야 합니다. 종종 잘못된 차원이 사용되어 *차원 불일치* 오류가 발생하거나 잘못된 데이터 유형 개체가 사용되는 경우가 있습니다. 예를 들면 `torch.long` 대신에 `torch.float32`가 사용된 경우입니다. 해결할 수 없는 오류가 발생하면 Hugging Face 팀에 도움을 요청하는 것이 좋습니다. 🤗 Transformers 구현이 올바르게 작동하는지 확인하는 마지막 단계는 출력이 `1e-3`의 정밀도로 동일한지 확인하는 것입니다. 먼저, 출력 모양이 동일하도록 보장해야 합니다. 즉, 🤗 Transformers 구현 스크립트와 원본 구현 사이에서 `outputs.shape`는 동일한 값을 반환해야 합니다. 그 다음으로, 출력 값이 동일하도록 해야 합니다. 이는 새로운 모델을 추가할 때 가장 어려운 부분 중 하나입니다. 출력이 동일하지 않은 일반적인 실수 사례는 다음과 같습니다: - 일부 레이어가 추가되지 않았습니다. 즉, *활성화* 레이어가 추가되지 않았거나 잔차 연결이 빠졌습니다. - 단어 임베딩 행렬이 연결되지 않았습니다. - 잘못된 위치 임베딩이 사용되었습니다. 원본 구현에서는 오프셋을 사용합니다. - 순방향 패스 중에 Dropout이 적용되었습니다. 이를 수정하려면 *model.training이 False*인지 확인하고 순방향 패스 중에 Dropout 레이어가 잘못 활성화되지 않도록 하세요. 즉, [PyTorch의 기능적 Dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout)에 *self.training*을 전달하세요. 문제를 해결하는 가장 좋은 방법은 일반적으로 원본 구현과 🤗 Transformers 구현의 순방향 패스를 나란히 놓고 차이점이 있는지 확인하는 것입니다. 이상적으로는 순방향 패스의 중간 출력을 디버그/출력하여 원본 구현과 🤗 Transformers 구현의 정확한 위치를 찾을 수 있어야 합니다. 먼저, 두 스크립트의 하드코딩된 `input_ids`가 동일한지 확인하세요. 다음으로, `input_ids`의 첫 번째 변환의 출력(일반적으로 단어 임베딩)이 동일한지 확인하세요. 그런 다음 네트워크의 가장 마지막 레이어까지 진행해보세요. 어느 시점에서 두 구현 사이에 차이가 있는 것을 알게 되는데, 이는 🤗 Transformers 구현의 버그 위치를 가리킬 것입니다. 저희 경험상으로는 원본 구현과 🤗 Transformers 구현 모두에서 동일한 위치에 많은 출력 문을 추가하고 이들의 중간 표현에 대해 동일한 값을 보이는 출력 문을 연속적으로 제거하는 것이 간단하고 효과적인 방법입니다. `torch.allclose(original_output, output, atol=1e-3)`로 출력을 확인하여 두 구현이 동일한 출력을 하는 것을 확신한다면, 가장 어려운 부분은 끝났습니다! 축하드립니다. 남은 작업은 쉬운 일이 될 것입니다 😊. **8. 필요한 모든 모델 테스트 추가하기** 이 시점에서 새로운 모델을 성공적으로 추가했습니다. 그러나 해당 모델이 요구되는 디자인에 완전히 부합하지 않을 수도 있습니다. 🤗 Transformers와 완벽하게 호환되는 구현인지 확인하기 위해 모든 일반 테스트를 통과해야 합니다. Cookiecutter는 아마도 모델을 위한 테스트 파일을 자동으로 추가했을 것입니다. 아마도 `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`와 같은 경로에 위치할 것입니다. 이 테스트 파일을 실행하여 일반 테스트가 모두 통과하는지 확인하세요. ```bash pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` 모든 일반 테스트를 수정한 후, 이제 수행한 작업을 충분히 테스트하여 다음 사항을 보장해야 합니다. - a) 커뮤니티가 *brand_new_bert*의 특정 테스트를 살펴봄으로써 작업을 쉽게 이해할 수 있도록 함 - b) 모델에 대한 향후 변경 사항이 모델의 중요한 기능을 손상시키지 않도록 함 먼저 통합 테스트를 추가해야 합니다. 이러한 통합 테스트는 이전에 모델을 🤗 Transformers로 구현하기 위해 사용한 디버깅 스크립트와 동일한 작업을 수행합니다. Cookiecutter에 이미 이러한 모델 테스트의 템플릿인 `BrandNewBertModelIntegrationTests`가 추가되어 있으며, 여러분이 작성해야 할 내용으로만 채워 넣으면 됩니다. 이러한 테스트가 통과하는지 확인하려면 다음을 실행하세요. ```bash RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> Windows를 사용하는 경우 `RUN_SLOW=1`을 `SET RUN_SLOW=1`로 바꿔야 합니다. </Tip> 둘째로, *brand_new_bert*에 특화된 모든 기능도 별도의 테스트에서 추가로 테스트해야 합니다. 이 부분은 종종 잊히는데, 두 가지 측면에서 굉장히 유용합니다. - *brand_new_bert*의 특수 기능이 어떻게 작동해야 하는지 보여줌으로써 커뮤니티에게 모델 추가 과정에서 습득한 지식을 전달하는 데 도움이 됩니다. - 향후 기여자는 이러한 특수 테스트를 실행하여 모델에 대한 변경 사항을 빠르게 테스트할 수 있습니다. **9. 토크나이저 구현하기** 다음으로, *brand_new_bert*의 토크나이저를 추가해야 합니다. 보통 토크나이저는 🤗 Transformers의 기존 토크나이저와 동일하거나 매우 유사합니다. 토크나이저가 올바르게 작동하는지 확인하기 위해 먼저 원본 리포지토리에서 문자열을 입력하고 `input_ids`를 반환하는 스크립트를 생성하는 것이 좋습니다. 다음과 같은 유사한 스크립트일 수 있습니다 (의사 코드로 작성): ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` 원본 리포지토리를 자세히 살펴보고 올바른 토크나이저 함수를 찾거나, 복제본에서 변경 사항을 적용하여 `input_ids`만 출력하도록 해야 합니다. 원본 리포지토리를 사용하는 기능적인 토큰화 스크립트를 작성한 후, 🤗 Transformers의 유사한 스크립트를 생성해야 합니다. 다음과 같이 작성되어야 합니다: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` 두 개의 `input_ids`가 동일한 값을 반환할 때, 마지막 단계로 토크나이저 테스트 파일도 추가해야 합니다. *brand_new_bert*의 모델링 테스트 파일과 유사하게, *brand_new_bert*의 토크나이제이션 테스트 파일에는 몇 가지 하드코딩된 통합 테스트가 포함되어야 합니다. **10. 종단 간 통합 테스트 실행** 토크나이저를 추가한 후에는 모델과 토크나이저를 사용하여 몇 가지 종단 간 통합 테스트를 추가해야 합니다. `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`에 추가해주세요. 이러한 테스트는 🤗 Transformers 구현이 예상대로 작동하는지를 의미 있는 text-to-text 예시로 보여줘야 합니다. 그 예시로는 *예를 들어* source-to-target 번역 쌍, article-to-summary 쌍, question-to-answer 쌍 등이 포함될 수 있습니다. 불러온 체크포인트 중 어느 것도 다운스트림 작업에서 미세 조정되지 않았다면, 모델 테스트만으로 충분합니다. 모델이 완전히 기능을 갖추었는지 확인하기 위해 마지막 단계로 GPU에서 모든 테스트를 실행하는 것이 좋습니다. 모델의 내부 텐서의 일부에 `.to(self.device)` 문을 추가하는 것을 잊었을 수 있으며, 이 경우 테스트에서 오류로 표시됩니다. GPU에 액세스할 수 없는 경우, Hugging Face 팀이 테스트를 대신 실행할 수 있습니다. **11. 기술문서 추가** 이제 *brand_new_bert*에 필요한 모든 기능이 추가되었습니다. 거의 끝났습니다! 추가해야 할 것은 멋진 기술문서과 기술문서 페이지입니다. Cookiecutter가 `docs/source/model_doc/brand_new_bert.md`라는 템플릿 파일을 추가해줬을 것입니다. 이 페이지를 사용하기 전에 모델을 사용하는 사용자들은 일반적으로 이 페이지를 먼저 확인합니다. 따라서 문서는 이해하기 쉽고 간결해야 합니다. 모델을 사용하는 방법을 보여주기 위해 *팁*을 추가하는 것이 커뮤니티에 매우 유용합니다. 독스트링에 관련하여 Hugging Face 팀에 문의하는 것을 주저하지 마세요. 다음으로, `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`에 추가된 독스트링이 올바르며 필요한 모든 입력 및 출력을 포함하도록 확인하세요. [여기](writing-documentation)에서 우리의 문서 작성 가이드와 독스트링 형식에 대한 상세 가이드가 있습니다. 문서는 일반적으로 커뮤니티와 모델의 첫 번째 접점이기 때문에, 문서는 적어도 코드만큼의 주의를 기울여야 합니다. **코드 리팩토링** 좋아요, 이제 *brand_new_bert*를 위한 모든 필요한 코드를 추가했습니다. 이 시점에서 다음을 실행하여 잠재적으로 잘못된 코드 스타일을 수정해야 합니다: 그리고 코딩 스타일이 품질 점검을 통과하는지 확인하기 위해 다음을 실행하고 확인해야 합니다: ```bash make style ``` 🤗 Transformers에는 여전히 실패할 수 있는 몇 가지 매우 엄격한 디자인 테스트가 있습니다. 이는 독스트링에 누락된 정보나 잘못된 명명 때문에 종종 발생합니다. 여기서 막히면 Hugging Face 팀이 도움을 줄 것입니다. ```bash make quality ``` 마지막으로, 코드가 정확히 작동하는 것을 확인한 후에는 항상 코드를 리팩토링하는 것이 좋은 생각입니다. 모든 테스트가 통과된 지금은 추가한 코드를 다시 검토하고 리팩토링하는 좋은 시기입니다. 이제 코딩 부분을 완료했습니다. 축하합니다! 🎉 멋져요! 😎 **12. 모델을 모델 허브에 업로드하세요** 이 마지막 파트에서는 모든 체크포인트를 변환하여 모델 허브에 업로드하고 각 업로드된 모델 체크포인트에 대한 모델 카드를 추가해야 합니다. [Model sharing and uploading Page](model_sharing)를 읽고 허브 기능에 익숙해지세요. *brand_new_bert*의 저자 조직 아래에 모델을 업로드할 수 있는 필요한 액세스 권한을 얻기 위해 Hugging Face 팀과 협업해야 합니다. `transformers`의 모든 모델에 있는 `push_to_hub` 메서드는 체크포인트를 허브에 빠르고 효율적으로 업로드하는 방법입니다. 아래에 작은 코드 조각이 붙여져 있습니다: 각 체크포인트에 적합한 모델 카드를 만드는 데 시간을 할애하는 것은 가치가 있습니다. 모델 카드는 체크포인트의 특성을 강조해야 합니다. *예를 들어* 이 체크포인트는 어떤 데이터셋에서 사전 훈련/세부 훈련되었는지? 이 모델은 어떤 하위 작업에서 사용해야 하는지? 그리고 모델을 올바르게 사용하는 방법에 대한 몇 가지 코드도 포함해야 합니다. ```python brand_new_bert.push_to_hub("brand_new_bert") # Uncomment the following line to push to an organization. # brand_new_bert.push_to_hub("<organization>/brand_new_bert") ``` **13. (선택 사항) 노트북 추가** *brand_new_bert*를 다운스트림 작업에서 추론 또는 미세 조정에 사용하는 방법을 자세히 보여주는 노트북을 추가하는 것이 매우 유용합니다. 이것은 PR을 병합하는 데 필수적이지는 않지만 커뮤니티에 매우 유용합니다. **14. 완료된 PR 제출** 이제 프로그래밍을 마쳤으며, 마지막 단계로 PR을 메인 브랜치에 병합해야 합니다. 보통 Hugging Face 팀은 이미 여기까지 도움을 주었을 것입니다. 그러나 PR에 멋진 설명을 추가하고 리뷰어에게 특정 디자인 선택 사항을 강조하려면 완료된 PR에 약간의 설명을 추가하는 시간을 할애하는 것이 가치가 있습니다. ### 작업물을 공유하세요!! [[share-your-work]] 이제 커뮤니티에서 작업물을 인정받을 시간입니다! 모델 추가 작업을 완료하는 것은 Transformers와 전체 NLP 커뮤니티에 큰 기여입니다. 당신의 코드와 이식된 사전 훈련된 모델은 수백, 심지어 수천 명의 개발자와 연구원에 의해 확실히 사용될 것입니다. 당신의 작업에 자랑스러워해야 하며 이를 커뮤니티와 공유해야 합니다. **당신은 커뮤니티 내 모든 사람들에게 매우 쉽게 접근 가능한 또 다른 모델을 만들었습니다! 🤯**
transformers/docs/source/ko/add_new_model.md/0
{ "file_path": "transformers/docs/source/ko/add_new_model.md", "repo_id": "transformers", "token_count": 43040 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Text generation strategies[[text-generation-strategies]] 텍스트 생성은 개방형 텍스트 작성, 요약, 번역 등 다양한 자연어 처리(NLP) 작업에 필수적입니다. 이는 또한 음성-텍스트 변환, 시각-텍스트 변환과 같이 텍스트를 출력으로 하는 여러 혼합 모달리티 응용 프로그램에서도 중요한 역할을 합니다. 텍스트 생성을 가능하게 하는 몇몇 모델로는 GPT2, XLNet, OpenAI GPT, CTRL, TransformerXL, XLM, Bart, T5, GIT, Whisper 등이 있습니다. [`~generation.GenerationMixin.generate`] 메서드를 활용하여 다음과 같은 다양한 작업들에 대해 텍스트 결과물을 생성하는 몇 가지 예시를 살펴보세요: * [텍스트 요약](./tasks/summarization#inference) * [이미지 캡셔닝](./model_doc/git#transformers.GitForCausalLM.forward.example) * [오디오 전사](./model_doc/whisper#transformers.WhisperForConditionalGeneration.forward.example) generate 메소드에 입력되는 값들은 모델의 데이터 형태에 따라 달라집니다. 이 값들은 AutoTokenizer나 AutoProcessor와 같은 모델의 전처리 클래스에 의해 반환됩니다. 모델의 전처리 장치가 하나 이상의 입력 유형을 생성하는 경우, 모든 입력을 generate()에 전달해야 합니다. 각 모델의 전처리 장치에 대해서는 해당 모델의 문서에서 자세히 알아볼 수 있습니다. 텍스트를 생성하기 위해 출력 토큰을 선택하는 과정을 디코딩이라고 하며, `generate()` 메소드가 사용할 디코딩 전략을 사용자가 커스터마이징할 수 있습니다. 디코딩 전략을 수정하는 것은 훈련 가능한 매개변수의 값들을 변경하지 않지만, 생성된 출력의 품질에 눈에 띄는 영향을 줄 수 있습니다. 이는 텍스트에서 반복을 줄이고, 더 일관성 있게 만드는 데 도움을 줄 수 있습니다. 이 가이드에서는 다음과 같은 내용을 다룹니다: * 기본 생성 설정 * 일반적인 디코딩 전략과 주요 파라미터 * 🤗 Hub에서 미세 조정된 모델과 함께 사용자 정의 생성 설정을 저장하고 공유하는 방법 ## 기본 텍스트 생성 설정[[default-text-generation-configuration]] 모델의 디코딩 전략은 생성 설정에서 정의됩니다. 사전 훈련된 모델을 [`pipeline`] 내에서 추론에 사용할 때, 모델은 내부적으로 기본 생성 설정을 적용하는 `PreTrainedModel.generate()` 메소드를 호출합니다. 사용자가 모델과 함께 사용자 정의 설정을 저장하지 않았을 경우에도 기본 설정이 사용됩니다. 모델을 명시적으로 로드할 때, `model.generation_config`을 통해 제공되는 생성 설정을 검사할 수 있습니다. ```python >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") >>> model.generation_config GenerationConfig { "bos_token_id": 50256, "eos_token_id": 50256, } ``` `model.generation_config`를 출력하면 기본 설정과 다른 값들만 표시되고, 기본값들은 나열되지 않습니다. 기본 생성 설정은 입력 프롬프트와 출력을 합친 최대 크기를 20 토큰으로 제한하여 리소스 부족을 방지합니다. 기본 디코딩 전략은 탐욕 탐색(greedy search)으로, 다음 토큰으로 가장 높은 확률을 가진 토큰을 선택하는 가장 단순한 디코딩 전략입니다. 많은 작업과 작은 출력 크기에 대해서는 이 방법이 잘 작동하지만, 더 긴 출력을 생성할 때 사용하면 매우 반복적인 결과를 생성하게 될 수 있습니다. ## 텍스트 생성 사용자 정의[[customize-text-generation]] 파라미터와 해당 값을 [`generate`] 메소드에 직접 전달하여 `generation_config`을 재정의할 수 있습니다: ```python >>> my_model.generate(**inputs, num_beams=4, do_sample=True) # doctest: +SKIP ``` 기본 디코딩 전략이 대부분의 작업에 잘 작동한다 하더라도, 조정할 수 있는 몇 가지 파라미터가 있습니다. 일반적으로 조정되는 파라미터에는 다음과 같은 것들이 포함됩니다: - `max_new_tokens`: 생성할 최대 토큰 수입니다. 즉, 프롬프트에 있는 토큰을 제외한 출력 시퀀스의 크기입니다. 출력의 길이를 중단 기준으로 사용하는 대신, 전체 생성물이 일정 시간을 초과할 때 생성을 중단하기로 선택할 수도 있습니다. 더 알아보려면 [`StoppingCriteria`]를 확인하세요. - `num_beams`: 1보다 큰 수의 빔을 지정함으로써, 탐욕 탐색(greedy search)에서 빔 탐색(beam search)으로 전환하게 됩니다. 이 전략은 각 시간 단계에서 여러 가설을 평가하고 결국 전체 시퀀스에 대해 가장 높은 확률을 가진 가설을 선택합니다. 이는 초기 토큰의 확률이 낮아 탐욕 탐색에 의해 무시되었을 높은 확률의 시퀀스를 식별할 수 있는 장점을 가집니다. - `do_sample`: 이 매개변수를 `True`로 설정하면, 다항 샘플링, 빔 탐색 다항 샘플링, Top-K 샘플링 및 Top-p 샘플링과 같은 디코딩 전략을 활성화합니다. 이러한 전략들은 전체 어휘에 대한 확률 분포에서 다음 토큰을 선택하며, 전략별로 특정 조정이 적용됩니다. - `num_return_sequences`: 각 입력에 대해 반환할 시퀀스 후보의 수입니다. 이 옵션은 빔 탐색(beam search)의 변형과 샘플링과 같이 여러 시퀀스 후보를 지원하는 디코딩 전략에만 사용할 수 있습니다. 탐욕 탐색(greedy search)과 대조 탐색(contrastive search) 같은 디코딩 전략은 단일 출력 시퀀스를 반환합니다. ## 모델에 사용자 정의 디코딩 전략 저장[[save-a-custom-decoding-strategy-with-your-model]] 특정 생성 설정을 가진 미세 조정된 모델을 공유하고자 할 때, 다음 단계를 따를 수 있습니다: * [`GenerationConfig`] 클래스 인스턴스를 생성합니다. * 디코딩 전략 파라미터를 설정합니다. * 생성 설정을 [`GenerationConfig.save_pretrained`]를 사용하여 저장하며, `config_file_name` 인자는 비워둡니다. * 모델의 저장소에 설정을 업로드하기 위해 `push_to_hub`를 `True`로 설정합니다. ```python >>> from transformers import AutoModelForCausalLM, GenerationConfig >>> model = AutoModelForCausalLM.from_pretrained("my_account/my_model") # doctest: +SKIP >>> generation_config = GenerationConfig( ... max_new_tokens=50, do_sample=True, top_k=50, eos_token_id=model.config.eos_token_id ... ) >>> generation_config.save_pretrained("my_account/my_model", push_to_hub=True) # doctest: +SKIP ``` 단일 디렉토리에 여러 생성 설정을 저장할 수 있으며, 이때 [`GenerationConfig.save_pretrained`]의 `config_file_name` 인자를 사용합니다. 나중에 [`GenerationConfig.from_pretrained`]로 이들을 인스턴스화할 수 있습니다. 이는 단일 모델에 대해 여러 생성 설정을 저장하고 싶을 때 유용합니다(예: 샘플링을 이용한 창의적 텍스트 생성을 위한 하나, 빔 탐색을 이용한 요약을 위한 다른 하나 등). 모델에 설정 파일을 추가하기 위해 적절한 Hub 권한을 가지고 있어야 합니다. ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") >>> translation_generation_config = GenerationConfig( ... num_beams=4, ... early_stopping=True, ... decoder_start_token_id=0, ... eos_token_id=model.config.eos_token_id, ... pad_token=model.config.pad_token_id, ... ) >>> # 팁: Hub에 push하려면 `push_to_hub=True`를 추가 >>> translation_generation_config.save_pretrained("/tmp", "translation_generation_config.json") >>> # 명명된 생성 설정 파일을 사용하여 생성을 매개변수화할 수 있습니다. >>> generation_config = GenerationConfig.from_pretrained("/tmp", "translation_generation_config.json") >>> inputs = tokenizer("translate English to French: Configuration files are easy to use!", return_tensors="pt") >>> outputs = model.generate(**inputs, generation_config=generation_config) >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['Les fichiers de configuration sont faciles à utiliser!'] ``` ## 스트리밍[[streaming]] `generate()` 메소드는 `streamer` 입력을 통해 스트리밍을 지원합니다. `streamer` 입력은 `put()`과 `end()` 메소드를 가진 클래스의 인스턴스와 호환됩니다. 내부적으로, `put()`은 새 토큰을 추가하는 데 사용되며, `end()`는 텍스트 생성의 끝을 표시하는 데 사용됩니다. <Tip warning={true}> 스트리머 클래스의 API는 아직 개발 중이며, 향후 변경될 수 있습니다. </Tip> 실제로 다양한 목적을 위해 자체 스트리밍 클래스를 만들 수 있습니다! 또한, 기본적인 스트리밍 클래스들도 준비되어 있어 바로 사용할 수 있습니다. 예를 들어, [`TextStreamer`] 클래스를 사용하여 `generate()`의 출력을 화면에 한 단어씩 스트리밍할 수 있습니다: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") >>> streamer = TextStreamer(tok) >>> # 스트리머는 평소와 같은 출력값을 반환할 뿐만 아니라 생성된 텍스트도 표준 출력(stdout)으로 출력합니다. >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20) An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven, ``` ## 디코딩 전략[[decoding-strategies]] `generate()` 매개변수와 궁극적으로 `generation_config`의 특정 조합을 사용하여 특정 디코딩 전략을 활성화할 수 있습니다. 이 개념이 처음이라면, 흔히 사용되는 디코딩 전략이 어떻게 작동하는지 설명하는 [이 블로그 포스트](https://huggingface.co/blog/how-to-generate)를 읽어보는 것을 추천합니다. 여기서는 디코딩 전략을 제어하는 몇 가지 매개변수를 보여주고, 이를 어떻게 사용할 수 있는지 설명하겠습니다. ### 탐욕 탐색(Greedy Search)[[greedy-search]] [`generate`]는 기본적으로 탐욕 탐색 디코딩을 사용하므로 이를 활성화하기 위해 별도의 매개변수를 지정할 필요가 없습니다. 이는 `num_beams`가 1로 설정되고 `do_sample=False`로 되어 있다는 의미입니다." ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "I look forward to" >>> checkpoint = "distilbert/distilgpt2" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['I look forward to seeing you all again!\n\n\n\n\n\n\n\n\n\n\n'] ``` ### 대조 탐색(Contrastive search)[[contrastive-search]] 2022년 논문 [A Contrastive Framework for Neural Text Generation](https://arxiv.org/abs/2202.06417)에서 제안된 대조 탐색 디코딩 전략은 반복되지 않으면서도 일관된 긴 출력을 생성하는 데 있어 우수한 결과를 보였습니다. 대조 탐색이 작동하는 방식을 알아보려면 [이 블로그 포스트](https://huggingface.co/blog/introducing-csearch)를 확인하세요. 대조 탐색의 동작을 가능하게 하고 제어하는 두 가지 주요 매개변수는 `penalty_alpha`와 `top_k`입니다: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> checkpoint = "openai-community/gpt2-large" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> prompt = "Hugging Face Company is" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> outputs = model.generate(**inputs, penalty_alpha=0.6, top_k=4, max_new_tokens=100) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Hugging Face Company is a family owned and operated business. We pride ourselves on being the best in the business and our customer service is second to none.\n\nIf you have any questions about our products or services, feel free to contact us at any time. We look forward to hearing from you!'] ``` ### 다항 샘플링(Multinomial sampling)[[multinomial-sampling]] 탐욕 탐색(greedy search)이 항상 가장 높은 확률을 가진 토큰을 다음 토큰으로 선택하는 것과 달리, 다항 샘플링(multinomial sampling, 조상 샘플링(ancestral sampling)이라고도 함)은 모델이 제공하는 전체 어휘에 대한 확률 분포를 기반으로 다음 토큰을 무작위로 선택합니다. 0이 아닌 확률을 가진 모든 토큰은 선택될 기회가 있으므로, 반복의 위험을 줄일 수 있습니다. 다항 샘플링을 활성화하려면 `do_sample=True` 및 `num_beams=1`을 설정하세요. ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed >>> set_seed(0) # 재현성을 위해 >>> checkpoint = "openai-community/gpt2-large" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> prompt = "Today was an amazing day because" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> outputs = model.generate(**inputs, do_sample=True, num_beams=1, max_new_tokens=100) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today was an amazing day because when you go to the World Cup and you don\'t, or when you don\'t get invited, that\'s a terrible feeling."'] ``` ### 빔 탐색(Beam-search) 디코딩[[beam-search-decoding]] 탐욕 검색(greedy search)과 달리, 빔 탐색(beam search) 디코딩은 각 시간 단계에서 여러 가설을 유지하고 결국 전체 시퀀스에 대해 가장 높은 확률을 가진 가설을 선택합니다. 이는 낮은 확률의 초기 토큰으로 시작하고 그리디 검색에서 무시되었을 가능성이 높은 시퀀스를 식별하는 이점이 있습니다. 이 디코딩 전략을 활성화하려면 `num_beams` (추적할 가설 수라고도 함)를 1보다 크게 지정하세요. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "It is astonishing how one can" >>> checkpoint = "openai-community/gpt2-medium" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, max_new_tokens=50) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['It is astonishing how one can have such a profound impact on the lives of so many people in such a short period of time."\n\nHe added: "I am very proud of the work I have been able to do in the last few years.\n\n"I have'] ``` ### 빔 탐색 다항 샘플링(Beam-search multinomial sampling)[[beam-search-multinomial-sampling]] 이 디코딩 전략은 이름에서 알 수 있듯이 빔 탐색과 다항 샘플링을 결합한 것입니다. 이 디코딩 전략을 사용하기 위해서는 `num_beams`를 1보다 큰 값으로 설정하고, `do_sample=True`로 설정해야 합니다. ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, set_seed >>> set_seed(0) # 재현성을 위해 >>> prompt = "translate English to German: The house is wonderful." >>> checkpoint = "google-t5/t5-small" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, do_sample=True) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'Das Haus ist wunderbar.' ``` ### 다양한 빔 탐색 디코딩(Diverse beam search decoding)[[diverse-beam-search-decoding]] 다양한 빔 탐색(Decoding) 전략은 선택할 수 있는 더 다양한 빔 시퀀스 집합을 생성할 수 있게 해주는 빔 탐색 전략의 확장입니다. 이 방법은 어떻게 작동하는지 알아보려면, [다양한 빔 탐색: 신경 시퀀스 모델에서 다양한 솔루션 디코딩하기](https://arxiv.org/pdf/1610.02424.pdf)를 참조하세요. 이 접근 방식은 세 가지 주요 매개변수를 가지고 있습니다: `num_beams`, `num_beam_groups`, 그리고 `diversity_penalty`. 다양성 패널티는 그룹 간에 출력이 서로 다르게 하기 위한 것이며, 각 그룹 내에서 빔 탐색이 사용됩니다. ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> checkpoint = "google/pegasus-xsum" >>> prompt = ( ... "The Permaculture Design Principles are a set of universal design principles " ... "that can be applied to any location, climate and culture, and they allow us to design " ... "the most efficient and sustainable human habitation and food production systems. " ... "Permaculture is a design system that encompasses a wide variety of disciplines, such " ... "as ecology, landscape design, environmental science and energy conservation, and the " ... "Permaculture design principles are drawn from these various disciplines. Each individual " ... "design principle itself embodies a complete conceptual framework based on sound " ... "scientific principles. When we bring all these separate principles together, we can " ... "create a design system that both looks at whole systems, the parts that these systems " ... "consist of, and how those parts interact with each other to create a complex, dynamic, " ... "living system. Each design principle serves as a tool that allows us to integrate all " ... "the separate parts of a design, referred to as elements, into a functional, synergistic, " ... "whole system, where the elements harmoniously interact and work together in the most " ... "efficient way possible." ... ) >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, num_beam_groups=5, max_new_tokens=30, diversity_penalty=1.0) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'The Design Principles are a set of universal design principles that can be applied to any location, climate and culture, and they allow us to design the' ``` 이 가이드에서는 다양한 디코딩 전략을 가능하게 하는 주요 매개변수를 보여줍니다. [`generate`] 메서드에 대한 고급 매개변수가 존재하므로 [`generate`] 메서드의 동작을 더욱 세부적으로 제어할 수 있습니다. 사용 가능한 매개변수의 전체 목록은 [API 문서](./main_classes/text_generation.md)를 참조하세요. ### 추론 디코딩(Speculative Decoding)[[speculative-decoding]] 추론 디코딩(보조 디코딩(assisted decoding)으로도 알려짐)은 동일한 토크나이저를 사용하는 훨씬 작은 보조 모델을 활용하여 몇 가지 후보 토큰을 생성하는 상위 모델의 디코딩 전략을 수정한 것입니다. 주 모델은 단일 전방 통과로 후보 토큰을 검증함으로써 디코딩 과정을 가속화합니다. `do_sample=True`일 경우, [추론 디코딩 논문](https://arxiv.org/pdf/2211.17192.pdf)에 소개된 토큰 검증과 재샘플링 방식이 사용됩니다. 현재, 탐욕 검색(greedy search)과 샘플링만이 지원되는 보조 디코딩(assisted decoding) 기능을 통해, 보조 디코딩은 배치 입력을 지원하지 않습니다. 보조 디코딩에 대해 더 알고 싶다면, [이 블로그 포스트](https://huggingface.co/blog/assisted-generation)를 확인해 주세요. 보조 디코딩을 활성화하려면 모델과 함께 `assistant_model` 인수를 설정하세요. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "Alice and Bob" >>> checkpoint = "EleutherAI/pythia-1.4b-deduped" >>> assistant_checkpoint = "EleutherAI/pythia-160m-deduped" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint) >>> outputs = model.generate(**inputs, assistant_model=assistant_model) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a'] ``` 샘플링 방법과 함께 보조 디코딩을 사용하는 경우 다항 샘플링과 마찬가지로 `temperature` 인수를 사용하여 무작위성을 제어할 수 있습니다. 그러나 보조 디코딩에서는 `temperature`를 낮추면 대기 시간을 개선하는 데 도움이 될 수 있습니다. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed >>> set_seed(42) # 재현성을 위해 >>> prompt = "Alice and Bob" >>> checkpoint = "EleutherAI/pythia-1.4b-deduped" >>> assistant_checkpoint = "EleutherAI/pythia-160m-deduped" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint) >>> outputs = model.generate(**inputs, assistant_model=assistant_model, do_sample=True, temperature=0.5) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Alice and Bob, who were both in their early twenties, were both in the process of'] ```
transformers/docs/source/ko/generation_strategies.md/0
{ "file_path": "transformers/docs/source/ko/generation_strategies.md", "repo_id": "transformers", "token_count": 13395 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 대규모 언어 모델로 생성하기 [[generation-with-llms]] [[open-in-colab]] LLM 또는 대규모 언어 모델은 텍스트 생성의 핵심 구성 요소입니다. 간단히 말하면, 주어진 입력 텍스트에 대한 다음 단어(정확하게는 토큰)를 예측하기 위해 훈련된 대규모 사전 훈련 변환기 모델로 구성됩니다. 토큰을 한 번에 하나씩 예측하기 때문에 새로운 문장을 생성하려면 모델을 호출하는 것 외에 더 복잡한 작업을 수행해야 합니다. 즉, 자기회귀 생성을 수행해야 합니다. 자기회귀 생성은 몇 개의 초기 입력값을 제공한 후, 그 출력을 다시 모델에 입력으로 사용하여 반복적으로 호출하는 추론 과정입니다. 🤗 Transformers에서는 [`~generation.GenerationMixin.generate`] 메소드가 이 역할을 하며, 이는 생성 기능을 가진 모든 모델에서 사용 가능합니다. 이 튜토리얼에서는 다음 내용을 다루게 됩니다: * LLM으로 텍스트 생성 * 일반적으로 발생하는 문제 해결 * LLM을 최대한 활용하기 위한 다음 단계 시작하기 전에 필요한 모든 라이브러리가 설치되어 있는지 확인하세요: ```bash pip install transformers bitsandbytes>=0.39.0 -q ``` ## 텍스트 생성 [[generate-text]] [인과적 언어 모델링(causal language modeling)](tasks/language_modeling)을 목적으로 학습된 언어 모델은 일련의 텍스트 토큰을 입력으로 사용하고, 그 결과로 다음 토큰이 나올 확률 분포를 제공합니다. <!-- [GIF 1 -- FWD PASS] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov" ></video> <figcaption>"LLM의 전방 패스"</figcaption> </figure> LLM과 자기회귀 생성을 함께 사용할 때 핵심적인 부분은 이 확률 분포로부터 다음 토큰을 어떻게 고를 것인지입니다. 다음 반복 과정에 사용될 토큰을 결정하는 한, 어떠한 방법도 가능합니다. 확률 분포에서 가장 가능성이 높은 토큰을 선택하는 것처럼 간단할 수도 있고, 결과 분포에서 샘플링하기 전에 수십 가지 변환을 적용하는 것처럼 복잡할 수도 있습니다. <!-- [GIF 2 -- TEXT GENERATION] --> <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_2_1080p.mov" ></video> <figcaption>"자기회귀 생성은 확률 분포에서 다음 토큰을 반복적으로 선택하여 텍스트를 생성합니다."</figcaption> </figure> 위에서 설명한 과정은 어떤 종료 조건이 충족될 때까지 반복적으로 수행됩니다. 모델이 시퀀스의 끝(EOS 토큰)을 출력할 때까지를 종료 조건으로 하는 것이 이상적입니다. 그렇지 않은 경우에는 미리 정의된 최대 길이에 도달했을 때 생성이 중단됩니다. 모델이 예상대로 동작하기 위해선 토큰 선택 단계와 정지 조건을 올바르게 설정하는 것이 중요합니다. 이러한 이유로, 각 모델에는 기본 생성 설정이 잘 정의된 [`~generation.GenerationConfig`] 파일이 함께 제공됩니다. 코드를 확인해봅시다! <Tip> 기본 LLM 사용에 관심이 있다면, 우리의 [`Pipeline`](pipeline_tutorial) 인터페이스로 시작하는 것을 추천합니다. 그러나 LLM은 양자화나 토큰 선택 단계에서의 미세한 제어와 같은 고급 기능들을 종종 필요로 합니다. 이러한 작업은 [`~generation.GenerationMixin.generate`]를 통해 가장 잘 수행될 수 있습니다. LLM을 이용한 자기회귀 생성은 자원을 많이 소모하므로, 적절한 처리량을 위해 GPU에서 실행되어야 합니다. </Tip> 먼저, 모델을 불러오세요. ```python >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained( ... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True ... ) ``` `from_pretrained` 함수를 호출할 때 2개의 플래그를 주목하세요: - `device_map`은 모델이 GPU로 이동되도록 합니다. - `load_in_4bit`는 리소스 요구 사항을 크게 줄이기 위해 [4비트 동적 양자화](main_classes/quantization)를 적용합니다. 이 외에도 모델을 초기화하는 다양한 방법이 있지만, LLM을 처음 시작할 때 이 설정을 추천합니다. 이어서 텍스트 입력을 [토크나이저](tokenizer_summary)으로 전처리하세요. ```python >>> from transformers import AutoTokenizer >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to(device) ``` `model_inputs` 변수에는 토큰화된 텍스트 입력과 함께 어텐션 마스크가 들어 있습니다. [`~generation.GenerationMixin.generate`]는 어텐션 마스크가 제공되지 않았을 경우에도 이를 추론하려고 노력하지만, 최상의 성능을 위해서는 가능하면 어텐션 마스크를 전달하는 것을 권장합니다. 마지막으로 [`~generation.GenerationMixin.generate`] 메소드를 호출해 생성된 토큰을 얻은 후, 이를 출력하기 전에 텍스트 형태로 변환하세요. ```python >>> generated_ids = model.generate(**model_inputs) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'A list of colors: red, blue, green, yellow, black, white, and brown' ``` 이게 전부입니다! 몇 줄의 코드만으로 LLM의 능력을 활용할 수 있게 되었습니다. ## 일반적으로 발생하는 문제 [[common-pitfalls]] [생성 전략](generation_strategies)이 많고, 기본값이 항상 사용 사례에 적합하지 않을 수 있습니다. 출력이 예상과 다를 때 흔히 발생하는 문제와 이를 해결하는 방법에 대한 목록을 만들었습니다. ```py >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") >>> tokenizer.pad_token = tokenizer.eos_token # Mistral has no pad token by default >>> model = AutoModelForCausalLM.from_pretrained( ... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True ... ) ``` ### 생성된 출력이 너무 짧거나 길다 [[generated-output-is-too-shortlong]] [`~generation.GenerationConfig`] 파일에서 별도로 지정하지 않으면, `generate`는 기본적으로 최대 20개의 토큰을 반환합니다. `generate` 호출에서 `max_new_tokens`을 수동으로 설정하여 반환할 수 있는 새 토큰의 최대 수를 설정하는 것이 좋습니다. LLM(정확하게는 [디코더 전용 모델](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt))은 입력 프롬프트도 출력의 일부로 반환합니다. ```py >>> model_inputs = tokenizer(["A sequence of numbers: 1, 2"], return_tensors="pt").to("cuda") >>> # By default, the output will contain up to 20 tokens >>> generated_ids = model.generate(**model_inputs, pad_token_id=tokenizer.eos_token_id) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'A sequence of numbers: 1, 2, 3, 4, 5' >>> # Setting `max_new_tokens` allows you to control the maximum length >>> generated_ids = model.generate(**model_inputs, pad_token_id=tokenizer.eos_token_id, max_new_tokens=50) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'A sequence of numbers: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,' ``` ### 잘못된 생성 모드 [[incorrect-generation-mode]] 기본적으로 [`~generation.GenerationConfig`] 파일에서 별도로 지정하지 않으면, `generate`는 각 반복에서 가장 확률이 높은 토큰을 선택합니다(그리디 디코딩). 하려는 작업에 따라 이 방법은 바람직하지 않을 수 있습니다. 예를 들어, 챗봇이나 에세이 작성과 같은 창의적인 작업은 샘플링이 적합할 수 있습니다. 반면, 오디오를 텍스트로 변환하거나 번역과 같은 입력 기반 작업은 그리디 디코딩이 더 적합할 수 있습니다. `do_sample=True`로 샘플링을 활성화할 수 있으며, 이 주제에 대한 자세한 내용은 이 [블로그 포스트](https://huggingface.co/blog/how-to-generate)에서 볼 수 있습니다. ```python >>> # Set seed or reproducibility -- you don't need this unless you want full reproducibility >>> from transformers import set_seed >>> set_seed(0) >>> model_inputs = tokenizer(["I am a cat."], return_tensors="pt").to("cuda") >>> # LLM + greedy decoding = repetitive, boring output >>> generated_ids = model.generate(**model_inputs) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'I am a cat. I am a cat. I am a cat. I am a cat' >>> # With sampling, the output becomes more creative! >>> generated_ids = model.generate(**model_inputs, do_sample=True) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] 'I am a cat.\nI just need to be. I am always.\nEvery time' ``` ### 잘못된 패딩 [[wrong-padding-side]] LLM은 [디코더 전용](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt) 구조를 가지고 있어, 입력 프롬프트에 대해 지속적으로 반복 처리를 합니다. 입력 데이터의 길이가 다르면 패딩 작업이 필요합니다. LLM은 패딩 토큰에서 작동을 이어가도록 설계되지 않았기 때문에, 입력 왼쪽에 패딩이 추가 되어야 합니다. 그리고 어텐션 마스크도 꼭 `generate` 함수에 전달되어야 합니다! ```python >>> # The tokenizer initialized above has right-padding active by default: the 1st sequence, >>> # which is shorter, has padding on the right side. Generation fails. >>> model_inputs = tokenizer( ... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt" ... ).to("cuda") >>> generated_ids = model.generate(**model_inputs) >>> tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True)[0] '' >>> # With left-padding, it works as expected! >>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b", padding_side="left") >>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default >>> model_inputs = tokenizer( ... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt" ... ).to("cuda") >>> generated_ids = model.generate(**model_inputs) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] '1, 2, 3, 4, 5, 6,' ``` <!-- TODO: when the prompting guide is ready, mention the importance of setting the right prompt in this section --> ## 추가 자료 [[further-resources]] 자기회귀 생성 프로세스는 상대적으로 단순한 편이지만, LLM을 최대한 활용하려면 여러 가지 요소를 고려해야 하므로 쉽지 않을 수 있습니다. LLM에 대한 더 깊은 이해와 활용을 위한 다음 단계는 아래와 같습니다: <!-- TODO: complete with new guides --> ### 고급 생성 사용 [[advanced-generate-usage]] 1. [가이드](generation_strategies)는 다양한 생성 방법을 제어하는 방법, 생성 설정 파일을 설정하는 방법, 출력을 스트리밍하는 방법에 대해 설명합니다. 2. [`~generation.GenerationConfig`]와 [`~generation.GenerationMixin.generate`], [generate-related classes](internal/generation_utils)를 참조해보세요. ### LLM 리더보드 [[llm-leaderboards]] 1. [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)는 오픈 소스 모델의 품질에 중점을 둡니다. 2. [Open LLM-Perf Leaderboard](https://huggingface.co/spaces/optimum/llm-perf-leaderboard)는 LLM 처리량에 중점을 둡니다. ### 지연 시간 및 처리량 [[latency-and-throughput]] 1. 메모리 요구 사항을 줄이려면, 동적 양자화에 대한 [가이드](main_classes/quantization)를 참조하세요. ### 관련 라이브러리 [[related-libraries]] 1. [`text-generation-inference`](https://github.com/huggingface/text-generation-inference)는 LLM을 위한 실제 운영 환경에 적합한 서버입니다. 2. [`optimum`](https://github.com/huggingface/optimum)은 특정 하드웨어 장치에서 LLM을 최적화하기 위해 🤗 Transformers를 확장한 것입니다.
transformers/docs/source/ko/llm_tutorial.md/0
{ "file_path": "transformers/docs/source/ko/llm_tutorial.md", "repo_id": "transformers", "token_count": 8185 }
# AltCLIP ## 개요[[overview]] AltCLIP 모델은 Zhongzhi Chen, Guang Liu, Bo-Wen Zhang, Fulong Ye, Qinghong Yang, Ledell Wu의 [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679v2) 논문에서 제안되었습니다. AltCLIP(CLIP의 언어 인코더를 변경하여 언어 기능 확장)은 다양한 이미지-텍스트 및 텍스트-텍스트 쌍으로 훈련된 신경망입니다. CLIP의 텍스트 인코더를 사전 훈련된 다국어 텍스트 인코더 XLM-R로 교체하여, 거의 모든 작업에서 CLIP과 유사한 성능을 얻을 수 있었으며, 원래 CLIP의 다국어 이해와 같은 기능도 확장되었습니다. 논문의 초록은 다음과 같습니다: *본 연구에서는 강력한 이중 언어 멀티모달 표현 모델을 훈련하는 개념적으로 간단하고 효과적인 방법을 제시합니다. OpenAI에서 출시한 사전 훈련된 멀티모달 표현 모델 CLIP에서 시작하여, 그 텍스트 인코더를 사전 훈련된 다국어 텍스트 인코더 XLM-R로 교체하고, 교사 학습과 대조 학습으로 구성된 2단계 훈련 스키마를 통해 언어와 이미지 표현을 정렬했습니다. 우리는 광범위한 작업 평가를 통해 우리의 방법을 검증했습니다. ImageNet-CN, Flicker30k-CN, COCO-CN을 포함한 여러 작업에서 새로운 최고 성능을 달성했으며, 거의 모든 작업에서 CLIP과 유사한 성능을 얻었습니다. 이는 CLIP의 텍스트 인코더를 단순히 변경하여 다국어 이해와 같은 확장 기능을 얻을 수 있음을 시사합니다.* 이 모델은 [jongjyh](https://huggingface.co/jongjyh)에 의해 기여되었습니다. ## 사용 팁과 예제[[usage-tips-and-example]] AltCLIP의 사용법은 CLIP과 매우 유사하며, 차이점은 텍스트 인코더에 있습니다. 일반적인 어텐션 대신 양방향 어텐션을 사용하며, XLM-R의 [CLS] 토큰을 사용하여 텍스트 임베딩을 나타냅니다. AltCLIP은 멀티모달 비전 및 언어 모델입니다. 이미지와 텍스트 간의 유사성 계산 및 제로샷 이미지 분류에 사용할 수 있습니다. AltCLIP은 ViT와 같은 트랜스포머를 사용하여 시각적 특징을 얻고, 양방향 언어 모델을 사용하여 텍스트 특징을 얻습니다. 이후 텍스트와 시각적 특징 모두 동일한 차원의 잠재 공간으로 투사됩니다. 투사된 이미지와 텍스트 특징 간의 내적을 유사도 점수로 사용합니다. 이미지를 트랜스포머 인코더에 입력하기 위해, 각 이미지를 일정한 크기의 겹치지 않는 패치 시퀀스로 분할한 뒤, 이를 선형 임베딩합니다. 전체 이미지를 나타내기 위해 [CLS] 토큰이 추가됩니다. 저자들은 절대 위치 임베딩도 추가하여 결과 벡터 시퀀스를 표준 트랜스포머 인코더에 입력합니다. [`CLIPImageProcessor`]는 모델을 위해 이미지를 크기 조정하고 정규화하는 데 사용할 수 있습니다. [`AltCLIPProcessor`]는 [`CLIPImageProcessor`]와 [`XLMRobertaTokenizer`]를 하나의 인스턴스로 묶어 텍스트를 인코딩하고 이미지를 준비합니다. 다음 예제는 [`AltCLIPProcessor`]와 [`AltCLIPModel`]을 사용하여 이미지와 텍스트 간의 유사성 점수를 얻는 방법을 보여줍니다. ```python >>> from PIL import Image >>> import requests >>> from transformers import AltCLIPModel, AltCLIPProcessor >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") >>> processor = AltCLIPProcessor.from_pretrained("BAAI/AltCLIP") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # 이미지-텍스트 유사도 점수 >>> probs = logits_per_image.softmax(dim=1) # 라벨 마다 확률을 얻기 위해 softmax 적용 ``` <Tip> 이 모델은 `CLIPModel`을 기반으로 하므로, 원래 CLIP처럼 사용할 수 있습니다. </Tip> ## AltCLIPConfig [[autodoc]] AltCLIPConfig - from_text_vision_configs ## AltCLIPTextConfig [[autodoc]] AltCLIPTextConfig ## AltCLIPVisionConfig [[autodoc]] AltCLIPVisionConfig ## AltCLIPProcessor [[autodoc]] AltCLIPProcessor ## AltCLIPModel [[autodoc]] AltCLIPModel - forward - get_text_features - get_image_features ## AltCLIPTextModel [[autodoc]] AltCLIPTextModel - forward ## AltCLIPVisionModel [[autodoc]] AltCLIPVisionModel - forward
transformers/docs/source/ko/model_doc/altclip.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/altclip.md", "repo_id": "transformers", "token_count": 3274 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DBRX[[dbrx]] ## 개요[[overview]] DBRX는 [트랜스포머 기반의](https://www.isattentionallyouneed.com/) 다음 토큰을 예측하는 디코더 전용 LLM 모델입니다. 총 132B 매개변수를 가진 *세밀한* 전문가 혼합(MoE) 아키텍처를 사용하며, 이 중 36B 매개변수가 입력마다 활성화됩니다. 12T 토큰의 텍스트와 코드 데이터로 사전 학습되었습니다. Mixtral-8x7B와 Grok-1과 같은 다른 공개 MoE 모델들과 비교했을 때, DBRX는 더 많은 수의 작은 전문가들을 사용하는 세밀한 구조를 가지고 있습니다. DBRX는 16개의 전문가 중 4개를 선택하는 반면, Mixtral-8x7B와 Grok-1은 8개의 전문가 중 2개를 선택합니다. 이는 65배 더 많은 전문가 조합을 가능하게 하며, 이를 통해 모델의 품질이 향상되는 것을 발견했습니다. DBRX는 회전 위치 인코딩(RoPE), 게이트 선형 유닛(GLU), 그룹 쿼리 어텐션(GQA)을 사용합니다. BPE 기반 모델이며 [tiktoken](https://github.com/openai/tiktoken) 저장소에 설명된 GPT-4 토크나이저를 사용합니다. 이러한 선택들은 철저한 평가와 스케일링 실험을 기반으로 이루어졌습니다. DBRX는 신중하게 선별된 12T 토큰의 데이터로 사전 학습되었으며, 최대 문맥 길이는 32K 토큰입니다. 이 데이터는 토큰 대비 MPT 계열 모델 학습에 사용된 데이터보다 최소 2배 이상 더 좋은 것으로 추정됩니다. 이 새로운 데이터셋은 데이터 처리를 위한 Apache Spark™와 Databricks 노트북, 그리고 데이터 관리와 거버넌스를 위한 Unity Catalog를 포함한 Databricks 도구 전체를 활용하여 개발되었습니다. 우리는 사전 학습을 위해 커리큘럼 학습을 사용했으며, 학습 중 데이터 믹스를 변경하는 방식이 모델 품질을 상당히 개선한다는 것을 발견했습니다. DBRX Instruct와 DBRX Base에 대한 더 자세한 정보는 이 [기술 블로그 포스트](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm)에서 확인할 수 있습니다. 이 모델은 [eitan-turok](https://huggingface.co/eitanturok)와 [abhi-db](https://huggingface.co/abhi-db)가 기여했습니다. 원본 코드는 [이곳](https://github.com/databricks/dbrx-instruct)에서 찾을 수 있지만, 최신 버전이 아닐 수 있습니다. ## 사용 예[[usage-examples]] `generate()` 메소드는 DBRX를 사용하여 텍스트를 생성하는 데 사용될 수 있습니다. 표준 어텐션 구현, 플래시 어텐션, PyTorch의 스케일된 내적 어텐션(Scaled Dot-Product Attention)을 사용하여 생성할 수 있습니다. 후자의 두 어텐션 구현 방식은 처리 속도를 크게 높여줍니다. ```python from transformers import DbrxForCausalLM, AutoTokenizer import torch tokenizer = AutoTokenizer.from_pretrained("databricks/dbrx-instruct", token="YOUR_HF_TOKEN") model = DbrxForCausalLM.from_pretrained( "databricks/dbrx-instruct", device_map="auto", torch_dtype=torch.bfloat16, token="YOUR_HF_TOKEN", ) input_text = "What does it take to build a great LLM?" messages = [{"role": "user", "content": input_text}] input_ids = tokenizer.apply_chat_template(messages, return_dict=True, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=200) print(tokenizer.decode(outputs[0])) ``` `pip install flash-attn`를 통해 플래시 어텐션을 설치하면, 더 빠른 생성이 가능합니다. (플래시 어텐션에 대한 HuggingFace 문서는 [이곳](https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2)에서 확인할 수 있습니다.) ```python from transformers import DbrxForCausalLM, AutoTokenizer import torch tokenizer = AutoTokenizer.from_pretrained("databricks/dbrx-instruct", token="YOUR_HF_TOKEN") model = DbrxForCausalLM.from_pretrained( "databricks/dbrx-instruct", device_map="auto", torch_dtype=torch.bfloat16, token="YOUR_HF_TOKEN", attn_implementation="flash_attention_2", ) input_text = "What does it take to build a great LLM?" messages = [{"role": "user", "content": input_text}] input_ids = tokenizer.apply_chat_template(messages, return_dict=True, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=200) print(tokenizer.decode(outputs[0])) ``` PyTorch의 스케일된 내적 어텐션을 사용하여도 더 빠른 생성이 가능합니다. (스케일된 내적 어텐션에 대한 HuggingFace 문서는 [이곳](https://huggingface.co/docs/transformers/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)에서 확인할 수 있습니다.) ```python from transformers import DbrxForCausalLM, AutoTokenizer import torch tokenizer = AutoTokenizer.from_pretrained("databricks/dbrx-instruct", token="YOUR_HF_TOKEN") model = DbrxForCausalLM.from_pretrained( "databricks/dbrx-instruct", device_map="auto", torch_dtype=torch.bfloat16, token="YOUR_HF_TOKEN", attn_implementation="sdpa", ) input_text = "What does it take to build a great LLM?" messages = [{"role": "user", "content": input_text}] input_ids = tokenizer.apply_chat_template(messages, return_dict=True, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids, max_new_tokens=200) print(tokenizer.decode(outputs[0])) ``` ## DbrxConfig[[transformers.DbrxConfig]] [[autodoc]] DbrxConfig ## DbrxModel[[transformers.DbrxModel]] [[autodoc]] DbrxModel - forward ## DbrxForCausalLM[[transformers.DbrxForCausalLM]] [[autodoc]] DbrxForCausalLM - forward
transformers/docs/source/ko/model_doc/dbrx.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/dbrx.md", "repo_id": "transformers", "token_count": 3617 }
<!--Copyright 2023 Mistral AI and The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See thze License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Mistral[[mistral]] ## 개요[[overview]] 미스트랄은 Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed가 작성한 [이 블로그 포스트](https://mistral.ai/news/announcing-mistral-7b/)에서 소개되었습니다. 블로그 포스트의 서두는 다음과 같습니다: *미스트랄 AI팀은 현존하는 언어 모델 중 크기 대비 가장 강력한 미스트랄7B를 출시하게 되어 자랑스럽습니다.* 미스트랄-7B는 [mistral.ai](https://mistral.ai/)에서 출시한 첫 번째 대규모 언어 모델(LLM)입니다. ### 아키텍처 세부사항[[architectural-details]] 미스트랄-7B는 다음과 같은 구조적 특징을 가진 디코더 전용 트랜스포머입니다: - 슬라이딩 윈도우 어텐션: 8k 컨텍스트 길이와 고정 캐시 크기로 훈련되었으며, 이론상 128K 토큰의 어텐션 범위를 가집니다. - GQA(Grouped Query Attention): 더 빠른 추론이 가능하고 더 작은 크기의 캐시를 사용합니다. - 바이트 폴백(Byte-fallback) BPE 토크나이저: 문자들이 절대 어휘 목록 외의 토큰으로 매핑되지 않도록 보장합니다. 더 자세한 내용은 [출시 블로그 포스트](https://mistral.ai/news/announcing-mistral-7b/)를 참조하세요. ### 라이선스[[license]] `미스트랄-7B`는 아파치 2.0 라이선스로 출시되었습니다. ## 사용 팁[[usage-tips]] 미스트랄 AI팀은 다음 3가지 체크포인트를 공개했습니다: - 기본 모델인 [미스트랄-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)은 인터넷 규모의 데이터에서 다음 토큰을 예측하도록 사전 훈련되었습니다. - 지시 조정 모델인 [미스트랄-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)은 지도 미세 조정(SFT)과 직접 선호도 최적화(DPO)를 사용한 채팅에 최적화된 기본 모델입니다. - 개선된 지시 조정 모델인 [미스트랄-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)는 v1을 개선한 버전입니다. 기본 모델은 다음과 같이 사용할 수 있습니다: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") >>> prompt = "My favourite condiment is" >>> model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda") >>> model.to(device) >>> generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True) >>> tokenizer.batch_decode(generated_ids)[0] "My favourite condiment is to ..." ``` 지시 조정 모델은 다음과 같이 사용할 수 있습니다: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2", device_map="auto") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") >>> messages = [ ... {"role": "user", "content": "What is your favourite condiment?"}, ... {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}, ... {"role": "user", "content": "Do you have mayonnaise recipes?"} ... ] >>> model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda") >>> generated_ids = model.generate(model_inputs, max_new_tokens=100, do_sample=True) >>> tokenizer.batch_decode(generated_ids)[0] "Mayonnaise can be made as follows: (...)" ``` 지시 조정 모델은 입력이 올바른 형식으로 준비되도록 [채팅 템플릿](../chat_templating)을 적용해야 합니다. ## 플래시 어텐션을 이용한 미스트랄 속도향상[[speeding-up-mistral-by-using-flash-attention]] 위의 코드 스니펫들은 어떤 최적화 기법도 사용하지 않은 추론 과정을 보여줍니다. 하지만 모델 내부에서 사용되는 어텐션 메커니즘의 더 빠른 구현인 [플래시 어텐션2](../perf_train_gpu_one.md#flash-attention-2)을 활용하면 모델의 속도를 크게 높일 수 있습니다. 먼저, 슬라이딩 윈도우 어텐션 기능을 포함하는 플래시 어텐션2의 최신 버전을 설치해야 합니다. ```bash pip install -U flash-attn --no-build-isolation ``` 하드웨어와 플래시 어텐션2의 호환여부를 확인하세요. 이에 대한 자세한 내용은 [플래시 어텐션 저장소](https://github.com/Dao-AILab/flash-attention)의 공식 문서에서 확인할 수 있습니다. 또한 모델을 반정밀도(예: `torch.float16`)로 불러와야합니다. 플래시 어텐션2를 사용하여 모델을 불러오고 실행하려면 아래 코드 스니펫을 참조하세요: ```python >>> import torch >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", torch_dtype=torch.float16, attn_implementation="flash_attention_2", device_map="auto") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") >>> prompt = "My favourite condiment is" >>> model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda") >>> model.to(device) >>> generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True) >>> tokenizer.batch_decode(generated_ids)[0] "My favourite condiment is to (...)" ``` ### 기대하는 속도 향상[[expected-speedups]] 다음은 `mistralai/Mistral-7B-v0.1` 체크포인트를 사용한 트랜스포머의 기본 구현과 플래시 어텐션2 버전 모델 사이의 순수 추론 시간을 비교한 예상 속도 향상 다이어그램입니다. <div style="text-align: center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/mistral-7b-inference-large-seqlen.png"> </div> ### 슬라이딩 윈도우 어텐션[[sliding-window-attention]] 현재 구현은 슬라이딩 윈도우 어텐션 메커니즘과 메모리 효율적인 캐시 관리 기능을 지원합니다. 슬라이딩 윈도우 어텐션을 활성화하려면, 슬라이딩 윈도우 어텐션과 호환되는`flash-attn`(`>=2.3.0`)버전을 사용하면 됩니다. 또한 플래시 어텐션2 모델은 더 메모리 효율적인 캐시 슬라이싱 메커니즘을 사용합니다. 미스트랄 모델의 공식 구현에서 권장하는 롤링 캐시 메커니즘을 따라, 캐시 크기를 고정(`self.config.sliding_window`)으로 유지하고, `padding_side="left"`인 경우에만 배치 생성(batch generation)을 지원하며, 현재 토큰의 절대 위치를 사용해 위치 임베딩을 계산합니다. ## 양자화로 미스트랄 크기 줄이기[[shrinking-down-mistral-using-quantization]] 미스트랄 모델은 70억 개의 파라미터를 가지고 있어, 절반의 정밀도(float16)로 약 14GB의 GPU RAM이 필요합니다. 각 파라미터가 2바이트로 저장되기 때문입니다. 하지만 [양자화](../quantization.md)를 사용하면 모델 크기를 줄일 수 있습니다. 모델을 4비트(즉, 파라미터당 반 바이트)로 양자화하면 약 3.5GB의 RAM만 필요합니다. 모델을 양자화하는 것은 `quantization_config`를 모델에 전달하는 것만큼 간단합니다. 아래에서는 BitsAndBytes 양자화를 사용하지만, 다른 양자화 방법은 [이 페이지](../quantization.md)를 참고하세요: ```python >>> import torch >>> from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig >>> # specify how to quantize the model >>> quantization_config = BitsAndBytesConfig( ... load_in_4bit=True, ... bnb_4bit_quant_type="nf4", ... bnb_4bit_compute_dtype="torch.float16", ... ) >>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2", quantization_config=True, device_map="auto") >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") >>> prompt = "My favourite condiment is" >>> messages = [ ... {"role": "user", "content": "What is your favourite condiment?"}, ... {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}, ... {"role": "user", "content": "Do you have mayonnaise recipes?"} ... ] >>> model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda") >>> generated_ids = model.generate(model_inputs, max_new_tokens=100, do_sample=True) >>> tokenizer.batch_decode(generated_ids)[0] "The expected output" ``` 이 모델은 [Younes Belkada](https://huggingface.co/ybelkada)와 [Arthur Zucker](https://huggingface.co/ArthurZ)가 기여했습니다. 원본 코드는 [이곳](https://github.com/mistralai/mistral-src)에서 확인할 수 있습니다. ## 리소스[[resources]] 미스트랄을 시작하는 데 도움이 되는 Hugging Face와 community 자료 목록(🌎로 표시됨) 입니다. 여기에 포함될 자료를 제출하고 싶으시다면 PR(Pull Request)를 열어주세요. 리뷰해 드리겠습니다! 자료는 기존 자료를 복제하는 대신 새로운 내용을 담고 있어야 합니다. <PipelineTag pipeline="text-generation"/> - 미스트랄-7B의 지도형 미세조정(SFT)을 수행하는 데모 노트북은 [이곳](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Mistral/Supervised_fine_tuning_(SFT)_of_an_LLM_using_Hugging_Face_tooling.ipynb)에서 확인할 수 있습니다. 🌎 - 2024년에 Hugging Face 도구를 사용해 LLM을 미세 조정하는 방법에 대한 [블로그 포스트](https://www.philschmid.de/fine-tune-llms-in-2024-with-trl). 🌎 - Hugging Face의 [정렬(Alignment) 핸드북](https://github.com/huggingface/alignment-handbook)에는 미스트랄-7B를 사용한 지도형 미세 조정(SFT) 및 직접 선호 최적화(DPO)를 수행하기 위한 스크립트와 레시피가 포함되어 있습니다. 여기에는 단일 GPU에서 QLoRa 및 다중 GPU를 사용한 전체 미세 조정을 위한 스크립트가 포함되어 있습니다. - [인과적 언어 모델링 작업 가이드](../tasks/language_modeling) ## MistralConfig[[transformers.MistralConfig]] [[autodoc]] MistralConfig ## MistralModel[[transformers.MistralModel]] [[autodoc]] MistralModel - forward ## MistralForCausalLM[[transformers.MistralForCausalLM]] [[autodoc]] MistralForCausalLM - forward ## MistralForSequenceClassification[[transformers.MistralForSequenceClassification]] [[autodoc]] MistralForSequenceClassification - forward ## MistralForTokenClassification[[transformers.MistralForTokenClassification]] [[autodoc]] MistralForTokenClassification - forward ## FlaxMistralModel[[transformers.FlaxMistralModel]] [[autodoc]] FlaxMistralModel - __call__ ## FlaxMistralForCausalLM[[transformers.FlaxMistralForCausalLM]] [[autodoc]] FlaxMistralForCausalLM - __call__ ## TFMistralModel[[transformers.TFMistralModel]] [[autodoc]] TFMistralModel - call ## TFMistralForCausalLM[[transformers.TFMistralForCausalLM]] [[autodoc]] TFMistralForCausalLM - call ## TFMistralForSequenceClassification[[transformers.TFMistralForSequenceClassification]] [[autodoc]] TFMistralForSequenceClassification - call
transformers/docs/source/ko/model_doc/mistral.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/mistral.md", "repo_id": "transformers", "token_count": 7040 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 모델 공유하기[[share-a-model]] 지난 두 튜토리얼에서 분산 설정을 위해 PyTorch, Keras 및 🤗 Accelerate를 사용하여 모델을 미세 조정하는 방법을 보았습니다. 다음 단계는 모델을 커뮤니티와 공유하는 것입니다! Hugging Face는 인공지능의 민주화를 위해 모두에게 지식과 자원을 공개적으로 공유해야 한다고 믿습니다. 다른 사람들이 시간과 자원을 절약할 수 있도록 커뮤니티에 모델을 공유하는 것을 고려해 보세요. 이 튜토리얼에서 [Model Hub](https://huggingface.co/models)에서 훈련되거나 미세 조정 모델을 공유하는 두 가지 방법에 대해 알아봅시다: - API를 통해 파일을 Hub에 푸시합니다. - 웹사이트를 통해 파일을 Hub로 끌어다 놓습니다. <iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <Tip> 커뮤니티에 모델을 공유하려면, [huggingface.co](https://huggingface.co/join)에 계정이 필요합니다. 기존 조직에 가입하거나 새로 만들 수도 있습니다. </Tip> ## 저장소 특징[[repository-features]] 모델 허브의 각 저장소는 일반적인 GitHub 저장소처럼 작동합니다. 저장소는 버전 관리, 커밋 기록, 차이점 시각화 기능을 제공합니다. 모델 허브에 내장된 버전 관리는 git 및 [git-lfs](https://git-lfs.github.com/)를 기반으로 합니다. 즉, 하나의 모델을 하나의 저장소로 취급하여 접근 제어 및 확장성이 향상됩니다. 버전 제어는 커밋 해시, 태그 또는 브랜치로 모델의 특정 버전을 고정하는 방법인 *revision*을 허용합니다. 따라서 `revision` 매개변수를 사용하여 특정 모델 버전을 가져올 수 있습니다: ```py >>> model = AutoModel.from_pretrained( ... "julien-c/EsperBERTo-small", revision="4c77982" # tag name, or branch name, or commit hash ... ) ``` 또한 저장소에서 파일을 쉽게 편집할 수 있으며, 커밋 기록과 차이를 볼 수 있습니다: ![vis_diff](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png) ## 설정[[setup]] 모델을 허브에 공유하기 전에 Hugging Face 자격 증명이 필요합니다. 터미널에 액세스할 수 있는 경우, 🤗 Transformers가 설치된 가상 환경에서 다음 명령을 실행합니다. 그러면 Hugging Face 캐시 폴더(기본적으로 `~/.cache/`)에 액세스 토큰을 저장합니다: ```bash huggingface-cli login ``` Jupyter 또는 Colaboratory와 같은 노트북을 사용 중인 경우, [`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library) 라이브러리가 설치되었는지 확인하세요. 이 라이브러리를 사용하면 API로 허브와 상호 작용할 수 있습니다. ```bash pip install huggingface_hub ``` 그런 다음 `notebook_login`로 허브에 로그인하고, [여기](https://huggingface.co/settings/token) 링크에서 로그인할 토큰을 생성합니다: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## 프레임워크 간 모델 변환하기[[convert-a-model-for-all-frameworks]] 다른 프레임워크로 작업하는 사용자가 모델을 사용할 수 있도록 하려면, PyTorch 및 TensorFlow 체크포인트를 모두 사용하여 모델을 변환하고 업로드하는 것이 좋습니다. 이 단계를 건너뛰어도 사용자는 다른 프레임워크에서 모델을 가져올 수 있지만, 🤗 Transformers가 체크포인트를 즉석에서 변환해야 하므로 속도가 느려질 수 있습니다. 체크포인트를 다른 프레임워크로 변환하는 것은 쉽습니다. PyTorch 및 TensorFlow가 설치되어 있는지 확인한 다음(설치 지침은 [여기](installation) 참조) 다른 프레임워크에서 작업에 대한 특정 모델을 찾습니다. <frameworkcontent> <pt> 체크포인트를 TensorFlow에서 PyTorch로 변환하려면 `from_tf=True`를 지정하세요: ```py >>> pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True) >>> pt_model.save_pretrained("path/to/awesome-name-you-picked") ``` </pt> <tf> 체크포인트를 PyTorch에서 TensorFlow로 변환하려면 `from_pt=True`를 지정하세요: ```py >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True) ``` 그런 다음 새로운 체크포인트와 함께 새로운 TensorFlow 모델을 저장할 수 있습니다: ```py >>> tf_model.save_pretrained("path/to/awesome-name-you-picked") ``` </tf> <jax> Flax에서 모델을 사용하는 경우, PyTorch에서 Flax로 체크포인트를 변환할 수도 있습니다: ```py >>> flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( ... "path/to/awesome-name-you-picked", from_pt=True ... ) ``` </jax> </frameworkcontent> ## 훈련 중 모델 푸시하기[[push-a-model-during-training]] <frameworkcontent> <pt> <Youtube id="Z1-XMy-GNLQ"/> 모델을 허브에 공유하는 것은 추가 매개변수나 콜백을 추가하는 것만큼 간단합니다. [미세 조정 튜토리얼](training)에서 [`TrainingArguments`] 클래스는 하이퍼파라미터와 추가 훈련 옵션을 지정하는 곳이라는 것을 기억하세요. 이러한 훈련 옵션 중 하나는 모델을 허브로 직접 푸시하는 기능을 포함합니다. [`TrainingArguments`]에서 `push_to_hub=True`를 설정하세요: ```py >>> training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True) ``` 평소와 같이 훈련 인수를 [`Trainer`]에 전달합니다: ```py >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... ) ``` 모델을 미세 조정한 후, [`Trainer`]에서 [`~transformers.Trainer.push_to_hub`]를 호출하여 훈련된 모델을 허브로 푸시하세요. 🤗 Transformers는 훈련 하이퍼파라미터, 훈련 결과 및 프레임워크 버전을 모델 카드에 자동으로 추가합니다! ```py >>> trainer.push_to_hub() ``` </pt> <tf> [`PushToHubCallback`]을 사용하여 모델을 허브에 공유하려면, [`PushToHubCallback`]에 다음 인수를 정의하세요: - 출력된 모델의 파일 경로 - 토크나이저 - `{Hub 사용자 이름}/{모델 이름}` 형식의 `hub_model_id` ```py >>> from transformers import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model" ... ) ``` [`fit`](https://keras.io/api/models/model_training_apis/)에 콜백을 추가하면, 🤗 Transformers가 훈련된 모델을 허브로 푸시합니다: ```py >>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback) ``` </tf> </frameworkcontent> ## `push_to_hub` 함수 사용하기[[use-the-pushtohub-function]] 모델에서 직접 `push_to_hub`를 호출하여 허브에 업로드할 수도 있습니다. `push_to_hub`에 모델 이름을 지정하세요: ```py >>> pt_model.push_to_hub("my-awesome-model") ``` 이렇게 하면 사용자 이름 아래에 모델 이름 `my-awesome-model`로 저장소가 생성됩니다. 이제 사용자는 `from_pretrained` 함수를 사용하여 모델을 가져올 수 있습니다: ```py >>> from transformers import AutoModel >>> model = AutoModel.from_pretrained("your_username/my-awesome-model") ``` 조직에 속하고 모델을 조직 이름으로 대신 푸시하려면 `repo_id`에 추가하세요: ```py >>> pt_model.push_to_hub("my-awesome-org/my-awesome-model") ``` `push_to_hub` 함수는 모델 저장소에 다른 파일을 추가하는 데에도 사용할 수 있습니다. 예를 들어 모델 저장소에 토크나이저를 추가할 수 있습니다: ```py >>> tokenizer.push_to_hub("my-awesome-model") ``` 또는 미세 조정된 PyTorch 모델의 TensorFlow 버전을 추가할 수도 있습니다: ```py >>> tf_model.push_to_hub("my-awesome-model") ``` 이제 Hugging Face 프로필로 이동하면, 새로 생성한 모델 저장소가 표시됩니다. **Files** 탭을 클릭하면 저장소에 업로드한 모든 파일이 표시됩니다. 저장소에 파일을 만들고 업로드하는 방법에 대한 자세한 내용은 허브 설명서 [여기](https://huggingface.co/docs/hub/how-to-upstream)를 참조하세요. ## 웹 인터페이스로 업로드하기[[upload-with-the-web-interface]] 코드 없는 접근 방식을 선호하는 사용자는 허브의 웹 인터페이스를 통해 모델을 업로드할 수 있습니다. [huggingface.co/new](https://huggingface.co/new)를 방문하여 새로운 저장소를 생성하세요: ![new_model_repo](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png) 여기서 모델에 대한 몇 가지 정보를 추가하세요: - 저장소의 **소유자**를 선택합니다. 이는 사용자 또는 사용자가 속한 조직일 수 있습니다. - 저장소 이름이 될 모델의 이름을 선택합니다. - 모델이 공개인지 비공개인지 선택합니다. - 모델의 라이센스 사용을 지정합니다. 이제 **Files** 탭을 클릭하고 **Add file** 버튼을 클릭하여 새로운 파일을 저장소에 업로드합니다. 그런 다음 업로드할 파일을 끌어다 놓고 커밋 메시지를 추가하세요. ![upload_file](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png) ## 모델 카드 추가하기[[add-a-model-card]] 사용자가 모델의 기능, 제한, 잠재적 편향 및 윤리적 고려 사항을 이해할 수 있도록 저장소에 모델 카드를 추가하세요. 모델 카드는 `README.md` 파일에 정의되어 있습니다. 다음 방법으로 모델 카드를 추가할 수 있습니다: * `README.md` 파일을 수동으로 생성하여 업로드합니다. * 모델 저장소에서 **Edit model card** 버튼을 클릭합니다. 모델 카드에 포함할 정보 유형에 대한 좋은 예는 DistilBert [모델 카드](https://huggingface.co/distilbert/distilbert-base-uncased)를 참조하세요. 모델의 탄소 발자국이나 위젯 예시 등 `README.md` 파일에서 제어할 수 있는 다른 옵션에 대한 자세한 내용은 [여기](https://huggingface.co/docs/hub/models-cards) 문서를 참조하세요.
transformers/docs/source/ko/model_sharing.md/0
{ "file_path": "transformers/docs/source/ko/model_sharing.md", "repo_id": "transformers", "token_count": 7560 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 이념과 목표 [[philosophy]] 🤗 Transformers는 다음과 같은 목적으로 만들어진 독자적인 라이브러리입니다: - 대규모 Transformers 모델을 사용하거나 연구하거나 확장하려는 기계 학습 연구원 및 교육자를 위한 것입니다. - 모델을 미세 조정하거나 제작용으로 사용하고자 하는 실전 개발자를 위한 것입니다. - 특정 기계 학습 작업을 해결하기 위해 사전훈련된 모델을 다운로드하고 사용하기만 하려는 엔지니어를 위한 것입니다. 이 라이브러리는 두 가지 주요 목표를 가지고 설계되었습니다: 1. 사용하기 쉽고 빠르게 만드는 것: - 학습해야 할 사용자 대상 추상화의 수를 제한했습니다. 실제로 거의 추상화가 없으며, 각 모델을 사용하기 위해 필요한 세 가지 표준 클래스인 [configuration](main_classes/configuration), [models](main_classes/model) 및 전처리 클래스인 ([tokenizer](main_classes/tokenizer)는 NLP용, [image processor](main_classes/image_processor)는 비전용, [feature extractor](main_classes/feature_extractor)는 오디오용, [processor](main_classes/processors)는 멀티모달 입력용)만 사용합니다. - 이러한 클래스는 공통적인 `from_pretrained()` 메서드를 사용하여 미리 훈련된 인스턴스에서 간단하고 통일된 방식으로 초기화할 수 있습니다. 이 메소드는 미리 훈련된 체크포인트에서 관련 클래스 인스턴스와 관련 데이터(구성의 하이퍼파라미터, 토크나이저의 어휘, 모델의 가중치)를 (필요한 경우) 다운로드하고 캐시하며 가져옵니다. 체크포인트는 [Hugging Face Hub](https://huggingface.co/models)에서 제공되거나 사용자 자체의 저장된 체크포인트에서 제공됩니다. - 이 세 가지 기본 클래스 위에 라이브러리는 [`pipeline`] API를 제공하여 주어진 작업에 대해 모델을 빠르게 추론하는 데 사용하고, [`Trainer`]를 제공하여 PyTorch 모델을 빠르게 훈련하거나 미세 조정할 수 있도록 합니다(모든 TensorFlow 모델은 `Keras.fit`과 호환됩니다). - 결과적으로, 이 라이브러리는 신경망을 구축하기 위한 모듈식 도구 상자가 아닙니다. 라이브러리를 확장하거나 구축하려면 일반적인 Python, PyTorch, TensorFlow, Keras 모듈을 사용하고 라이브러리의 기본 클래스를 상속하여 모델 로딩 및 저장과 같은 기능을 재사용하면 됩니다. 모델에 대한 코딩 철학에 대해 더 자세히 알고 싶다면 [Repeat Yourself](https://huggingface.co/blog/transformers-design-philosophy) 블로그 글을 확인해보세요. 2. 원래 모델과 가능한 한 근접한 성능을 제공하는 최신 모델을 제공하는 것: - 각 아키텍처에 대해 공식 저자가 제공한 결과를 재현하는 적어도 한 가지 예제를 제공합니다. - 코드는 원래 코드와 가능한 한 유사하게 유지되므로 PyTorch 코드는 TensorFlow 코드로 변환되어 *pytorchic*하지 않을 수 있고, 그 반대의 경우도 마찬가지입니다. 기타 목표 몇 가지: - 모델의 내부를 가능한 일관되게 노출시키기: - 전체 은닉 상태와 어텐션 가중치에 대한 액세스를 단일 API를 사용하여 제공합니다. - 전처리 클래스 및 기본 모델 API는 모델 간에 쉽게 전환할 수 있도록 표준화되어 있습니다. - 미세 조정 및 모델 탐색을 위한 유망한 도구들을 주관적으로 선택하기: - 미세 조정을 위해 어휘 및 임베딩에 새로운 토큰을 간단하고 일관된 방식으로 추가하는 방법을 제공합니다. - Transformer 헤드를 마스킹하고 가지치기하는 간단한 방법을 제공합니다. - PyTorch, TensorFlow 2.0 및 Flax 간에 쉽게 전환할 수 있도록 하여 하나의 프레임워크로 훈련하고 다른 프레임워크로 추론할 수 있게 합니다. ## 주요 개념 [[main-concepts]] 이 라이브러리는 각 모델에 대해 세 가지 유형의 클래스를 기반으로 구축되었습니다: - **모델 클래스**는 라이브러리에서 제공하는 사전 훈련된 가중치와 함께 작동하는 PyTorch 모델([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)), Keras 모델([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)), JAX/Flax 모델([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html))일 수 있습니다. - **구성 클래스**는 모델을 구축하는 데 필요한 하이퍼파라미터(예: 레이어 수 및 은닉 크기)를 저장합니다. 구성 클래스를 직접 인스턴스화할 필요는 없습니다. 특히, 수정 없이 고 사전 학습된 모델을 사용하는 경우 모델을 생성하면 모델의 일부인 구성을 자동으로 인스턴스화됩니다. - **전처리 클래스**는 원시 데이터를 모델이 수용하는 형식으로 변환합니다. [Tokenizer](main_classes/tokenizer)는 각 모델의 어휘를 저장하고, 문자열을 토큰 임베딩 인덱스 리스트로 인코딩하고 디코딩하기 위한 메소드를 제공합니다. [Image processors](main_classes/image_processor)는 비전 입력을 전처리하고, [feature extractors](main_classes/feature_extractor)는 오디오 입력을 전처리하며, [processor](main_classes/processors)는 멀티모달 입력을 처리합니다. 모든 이러한 클래스는 사전 훈련된 인스턴스에서 인스턴스화하고 로컬로 저장하며, 세 가지 메소드를 사용하여 Hub에서 공유할 수 있습니다: - `from_pretrained()` 메소드를 사용하면 라이브러리 자체에서 제공하는 사전 훈련된 버전(지원되는 모델은 [Model Hub](https://huggingface.co/models)에서 찾을 수 있음)이나 사용자가 로컬로 저장한 경우(또는 서버에 저장한 경우)의 모델, 구성 및 전처리 클래스를 인스턴스화할 수 있습니다. - `save_pretrained()` 메소드를 사용하면 모델, 구성 및 전처리 클래스를 로컬로 저장하여 `from_pretrained()`를 사용하여 다시 가져올 수 있습니다. - `push_to_hub()` 메소드를 사용하면 모델, 구성 및 전처리 클래스를 Hub에 공유하여 모두에게 쉽게 접근할 수 있습니다.
transformers/docs/source/ko/philosophy.md/0
{ "file_path": "transformers/docs/source/ko/philosophy.md", "repo_id": "transformers", "token_count": 5165 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 오디오 분류[[audio_classification]] [[open-in-colab]] <Youtube id="KWwzcmG98Ds"/> 오디오 분류는 텍스트와 마찬가지로 입력 데이터에 클래스 레이블 출력을 할당합니다. 유일한 차이점은 텍스트 입력 대신 원시 오디오 파형이 있다는 것입니다. 오디오 분류의 실제 적용 분야에는 화자의 의도 파악, 언어 분류, 소리로 동물 종을 식별하는 것 등이 있습니다. 이 문서에서 방법을 알아보겠습니다: 1. [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) 데이터 세트를 [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base)로 미세 조정하여 화자의 의도를 분류합니다. 2. 추론에 미세 조정된 모델을 사용하세요. <Tip> 이 작업과 호환되는 모든 아키텍처와 체크포인트를 보려면 [작업 페이지](https://huggingface.co/tasks/audio-classification)를 확인하는 것이 좋습니다. </Tip> 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash pip install transformers datasets evaluate ``` 모델을 업로드하고 커뮤니티와 공유할 수 있도록 허깅페이스 계정에 로그인하는 것이 좋습니다. 메시지가 표시되면 토큰을 입력하여 로그인합니다: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## MInDS-14 데이터셋 불러오기[[load_minds_14_dataset]] 먼저 🤗 Datasets 라이브러리에서 MinDS-14 데이터 세트를 가져옵니다: ```py >>> from datasets import load_dataset, Audio >>> minds = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` 데이터 세트의 `train` 분할을 [`~datasets.Dataset.train_test_split`] 메소드를 사용하여 더 작은 훈련 및 테스트 집합으로 분할합니다. 이렇게 하면 전체 데이터 세트에 더 많은 시간을 소비하기 전에 모든 것이 작동하는지 실험하고 확인할 수 있습니다. ```py >>> minds = minds.train_test_split(test_size=0.2) ``` 이제 데이터 집합을 살펴볼게요: ```py >>> minds DatasetDict({ train: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 450 }) test: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 113 }) }) ``` 데이터 세트에는 `lang_id` 및 `english_transcription`과 같은 유용한 정보가 많이 포함되어 있지만 이 가이드에서는 `audio` 및 `intent_class`에 중점을 둘 것입니다. 다른 열은 [`~datasets.Dataset.remove_columns`] 메소드를 사용하여 제거합니다: ```py >>> minds = minds.remove_columns(["path", "transcription", "english_transcription", "lang_id"]) ``` 예시를 살펴보겠습니다: ```py >>> minds["train"][0] {'audio': {'array': array([ 0. , 0. , 0. , ..., -0.00048828, -0.00024414, -0.00024414], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602b9a5fbb1e6d0fbce91f52.wav', 'sampling_rate': 8000}, 'intent_class': 2} ``` 두 개의 필드가 있습니다: - `audio`: 오디오 파일을 가져오고 리샘플링하기 위해 호출해야 하는 음성 신호의 1차원 `배열`입니다. - `intent_class`: 화자의 의도에 대한 클래스 ID를 나타냅니다. 모델이 레이블 ID에서 레이블 이름을 쉽게 가져올 수 있도록 레이블 이름을 정수로 매핑하는 사전을 만들거나 그 반대로 매핑하는 사전을 만듭니다: ```py >>> labels = minds["train"].features["intent_class"].names >>> label2id, id2label = dict(), dict() >>> for i, label in enumerate(labels): ... label2id[label] = str(i) ... id2label[str(i)] = label ``` 이제 레이블 ID를 레이블 이름으로 변환할 수 있습니다: ```py >>> id2label[str(2)] 'app_error' ``` ## 전처리[[preprocess]] 다음 단계는 오디오 신호를 처리하기 위해 Wav2Vec2 특징 추출기를 가져오는 것입니다: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` MinDS-14 데이터 세트의 샘플링 속도는 8khz이므로(이 정보는 [데이터세트 카드](https://huggingface.co/datasets/PolyAI/minds14)에서 확인할 수 있습니다), 사전 훈련된 Wav2Vec2 모델을 사용하려면 데이터 세트를 16kHz로 리샘플링해야 합니다: ```py >>> minds = minds.cast_column("audio", Audio(sampling_rate=16_000)) >>> minds["train"][0] {'audio': {'array': array([ 2.2098757e-05, 4.6582241e-05, -2.2803260e-05, ..., -2.8419291e-04, -2.3305941e-04, -1.1425107e-04], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602b9a5fbb1e6d0fbce91f52.wav', 'sampling_rate': 16000}, 'intent_class': 2} ``` 이제 전처리 함수를 만듭니다: 1. 가져올 `오디오` 열을 호출하고 필요한 경우 오디오 파일을 리샘플링합니다. 2. 오디오 파일의 샘플링 속도가 모델에 사전 훈련된 오디오 데이터의 샘플링 속도와 일치하는지 확인합니다. 이 정보는 Wav2Vec2 [모델 카드](https://huggingface.co/facebook/wav2vec2-base)에서 확인할 수 있습니다. 3. 긴 입력이 잘리지 않고 일괄 처리되도록 최대 입력 길이를 설정합니다. ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=16000, truncation=True ... ) ... return inputs ``` 전체 데이터 세트에 전처리 기능을 적용하려면 🤗 Datasets [`~datasets.Dataset.map`] 함수를 사용합니다. `batched=True`를 설정하여 데이터 집합의 여러 요소를 한 번에 처리하면 `map`의 속도를 높일 수 있습니다. 필요하지 않은 열을 제거하고 `intent_class`의 이름을 모델이 예상하는 이름인 `label`로 변경합니다: ```py >>> encoded_minds = minds.map(preprocess_function, remove_columns="audio", batched=True) >>> encoded_minds = encoded_minds.rename_column("intent_class", "label") ``` ## 평가하기[[evaluate]] 훈련 중에 메트릭을 포함하면 모델의 성능을 평가하는 데 도움이 되는 경우가 많습니다. 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 라이브러리를 사용하여 평가 방법을 빠르게 가져올 수 있습니다. 이 작업에서는 [accuracy(정확도)](https://huggingface.co/spaces/evaluate-metric/accuracy) 메트릭을 가져옵니다(메트릭을 가져오고 계산하는 방법에 대한 자세한 내용은 🤗 Evalutate [빠른 둘러보기](https://huggingface.co/docs/evaluate/a_quick_tour) 참조하세요): ```py >>> import evaluate >>> accuracy = evaluate.load("accuracy") ``` 그런 다음 예측과 레이블을 [`~evaluate.EvaluationModule.compute`]에 전달하여 정확도를 계산하는 함수를 만듭니다: ```py >>> import numpy as np >>> def compute_metrics(eval_pred): ... predictions = np.argmax(eval_pred.predictions, axis=1) ... return accuracy.compute(predictions=predictions, references=eval_pred.label_ids) ``` 이제 `compute_metrics` 함수를 사용할 준비가 되었으며, 트레이닝을 설정할 때 이 함수를 사용합니다. ## 훈련[[train]] <frameworkcontent> <pt> <Tip> [`Trainer`]로 모델을 미세 조정하는 데 익숙하지 않다면 기본 튜토리얼 [여기](../training#train-with-pytorch-trainer)을 살펴보세요! </Tip> 이제 모델 훈련을 시작할 준비가 되었습니다! [`AutoModelForAudioClassification`]을 이용해서 Wav2Vec2를 불러옵니다. 예상되는 레이블 수와 레이블 매핑을 지정합니다: ```py >>> from transformers import AutoModelForAudioClassification, TrainingArguments, Trainer >>> num_labels = len(id2label) >>> model = AutoModelForAudioClassification.from_pretrained( ... "facebook/wav2vec2-base", num_labels=num_labels, label2id=label2id, id2label=id2label ... ) ``` 이제 세 단계만 남았습니다: 1. 훈련 하이퍼파라미터를 [`TrainingArguments`]에 정의합니다. 유일한 필수 매개변수는 모델을 저장할 위치를 지정하는 `output_dir`입니다. `push_to_hub = True`를 설정하여 이 모델을 허브로 푸시합니다(모델을 업로드하려면 허깅 페이스에 로그인해야 합니다). 각 에폭이 끝날 때마다 [`Trainer`]가 정확도를 평가하고 훈련 체크포인트를 저장합니다. 2. 모델, 데이터 세트, 토크나이저, 데이터 콜레이터, `compute_metrics` 함수와 함께 훈련 인자를 [`Trainer`]에 전달합니다. 3. [`~Trainer.train`]을 호출하여 모델을 미세 조정합니다. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_mind_model", ... eval_strategy="epoch", ... save_strategy="epoch", ... learning_rate=3e-5, ... per_device_train_batch_size=32, ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=32, ... num_train_epochs=10, ... warmup_ratio=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=encoded_minds["train"], ... eval_dataset=encoded_minds["test"], ... processing_class=feature_extractor, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` 훈련이 완료되면 모든 사람이 모델을 사용할 수 있도록 [`~transformers.Trainer.push_to_hub`] 메소드를 사용하여 모델을 허브에 공유하세요: ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <Tip> For a more in-depth example of how to finetune a model for audio classification, take a look at the corresponding [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb). </Tip> ## 추론[[inference]] 이제 모델을 미세 조정했으니 추론에 사용할 수 있습니다! 추론을 실행할 오디오 파일을 가져옵니다. 필요한 경우 오디오 파일의 샘플링 속도를 모델의 샘플링 속도와 일치하도록 리샘플링하는 것을 잊지 마세요! ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) >>> sampling_rate = dataset.features["audio"].sampling_rate >>> audio_file = dataset[0]["audio"]["path"] ``` 추론을 위해 미세 조정한 모델을 시험해 보는 가장 간단한 방법은 [`pipeline`]에서 사용하는 것입니다. 모델을 사용하여 오디오 분류를 위한 `pipeline`을 인스턴스화하고 오디오 파일을 전달합니다: ```py >>> from transformers import pipeline >>> classifier = pipeline("audio-classification", model="stevhliu/my_awesome_minds_model") >>> classifier(audio_file) [ {'score': 0.09766869246959686, 'label': 'cash_deposit'}, {'score': 0.07998877018690109, 'label': 'app_error'}, {'score': 0.0781070664525032, 'label': 'joint_account'}, {'score': 0.07667109370231628, 'label': 'pay_bill'}, {'score': 0.0755252093076706, 'label': 'balance'} ] ``` 원하는 경우 `pipeline`의 결과를 수동으로 복제할 수도 있습니다: <frameworkcontent> <pt> 특징 추출기를 가져와서 오디오 파일을 전처리하고 `입력`을 PyTorch 텐서로 반환합니다: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("stevhliu/my_awesome_minds_model") >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") ``` 모델에 입력을 전달하고 로짓을 반환합니다: ```py >>> from transformers import AutoModelForAudioClassification >>> model = AutoModelForAudioClassification.from_pretrained("stevhliu/my_awesome_minds_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` 확률이 가장 높은 클래스를 가져온 다음 모델의 `id2label` 매핑을 사용하여 이를 레이블로 변환합니다: ```py >>> import torch >>> predicted_class_ids = torch.argmax(logits).item() >>> predicted_label = model.config.id2label[predicted_class_ids] >>> predicted_label 'cash_deposit' ``` </pt> </frameworkcontent>
transformers/docs/source/ko/tasks/audio_classification.md/0
{ "file_path": "transformers/docs/source/ko/tasks/audio_classification.md", "repo_id": "transformers", "token_count": 7985 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 의미적 분할(Semantic segmentation)[[semantic-segmentation]] [[open-in-colab]] <Youtube id="dKE8SIt9C-w"/> 의미적 분할(semantic segmentation)은 이미지의 각 픽셀에 레이블 또는 클래스를 할당합니다. 분할(segmentation)에는 여러 종류가 있으며, 의미적 분할의 경우 동일한 물체의 고유 인스턴스를 구분하지 않습니다. 두 물체 모두 동일한 레이블이 지정됩니다(예시로, "car-1" 과 "car-2" 대신 "car"로 지정합니다). 실생활에서 흔히 볼 수 있는 의미적 분할의 적용 사례로는 보행자와 중요한 교통 정보를 식별하는 자율 주행 자동차 학습, 의료 이미지의 세포와 이상 징후 식별, 그리고 위성 이미지의 환경 변화 모니터링등이 있습니다. 이번 가이드에서 배울 내용은 다음과 같습니다: 1. [SceneParse150](https://huggingface.co/datasets/scene_parse_150) 데이터 세트를 이용해 [SegFormer](https://huggingface.co/docs/transformers/main/en/model_doc/segformer#segformer) 미세 조정하기. 2. 미세 조정된 모델을 추론에 사용하기. <Tip> 이 작업과 호환되는 모든 아키텍처와 체크포인트를 보려면 [작업 페이지](https://huggingface.co/tasks/image-segmentation)를 확인하는 것이 좋습니다. </Tip> 시작하기 전에 필요한 모든 라이브러리가 설치되었는지 확인하세요: ```bash pip install -q datasets transformers evaluate ``` 커뮤니티에 모델을 업로드하고 공유할 수 있도록 Hugging Face 계정에 로그인하는 것을 권장합니다. 프롬프트가 나타나면 토큰을 입력하여 로그인하세요: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## SceneParse150 데이터 세트 불러오기[[load-sceneparse150-dataset]] 🤗 Datasets 라이브러리에서 SceneParse150 데이터 세트의 더 작은 부분 집합을 가져오는 것으로 시작합니다. 이렇게 하면 데이터 세트 전체에 대한 훈련에 많은 시간을 할애하기 전에 실험을 통해 모든 것이 제대로 작동하는지 확인할 수 있습니다. ```py >>> from datasets import load_dataset >>> ds = load_dataset("scene_parse_150", split="train[:50]") ``` 데이터 세트의 `train`을 [`~datasets.Dataset.train_test_split`] 메소드를 사용하여 훈련 및 테스트 세트로 분할하세요: ```py >>> ds = ds.train_test_split(test_size=0.2) >>> train_ds = ds["train"] >>> test_ds = ds["test"] ``` 그리고 예시를 살펴보세요: ```py >>> train_ds[0] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x683 at 0x7F9B0C201F90>, 'annotation': <PIL.PngImagePlugin.PngImageFile image mode=L size=512x683 at 0x7F9B0C201DD0>, 'scene_category': 368} ``` - `image`: 장면의 PIL 이미지입니다. - `annotation`: 분할 지도(segmentation map)의 PIL 이미지입니다. 모델의 타겟이기도 합니다. - `scene_category`: "주방" 또는 "사무실"과 같이 이미지 장면을 설명하는 카테고리 ID입니다. 이 가이드에서는 둘 다 PIL 이미지인 `image`와 `annotation`만을 사용합니다. 나중에 모델을 설정할 때 유용하게 사용할 수 있도록 레이블 ID를 레이블 클래스에 매핑하는 사전도 만들고 싶을 것입니다. Hub에서 매핑을 다운로드하고 `id2label` 및 `label2id` 사전을 만드세요: ```py >>> import json >>> from pathlib import Path >>> from huggingface_hub import hf_hub_download >>> repo_id = "huggingface/label-files" >>> filename = "ade20k-id2label.json" >>> id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) >>> id2label = {int(k): v for k, v in id2label.items()} >>> label2id = {v: k for k, v in id2label.items()} >>> num_labels = len(id2label) ``` ## 전처리하기[[preprocess] 다음 단계는 모델에 사용할 이미지와 주석을 준비하기 위해 SegFormer 이미지 프로세서를 불러오는 것입니다. 우리가 사용하는 데이터 세트와 같은 일부 데이터 세트는 배경 클래스로 제로 인덱스를 사용합니다. 하지만 배경 클래스는 150개의 클래스에 실제로는 포함되지 않기 때문에 `do_reduce_labels=True` 를 설정해 모든 레이블에서 배경 클래스를 제거해야 합니다. 제로 인덱스는 `255`로 대체되므로 SegFormer의 손실 함수에서 무시됩니다: ```py >>> from transformers import AutoImageProcessor >>> checkpoint = "nvidia/mit-b0" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint, do_reduce_labels=True) ``` <frameworkcontent> <pt> 이미지 데이터 세트에 데이터 증강을 적용하여 과적합에 대해 모델을 보다 강건하게 만드는 것이 일반적입니다. 이 가이드에서는 [torchvision](https://pytorch.org/vision/stable/index.html)의 [`ColorJitter`](https://pytorch.org/vision/stable/generated/torchvision.transforms.ColorJitter.html)를 사용하여 이미지의 색상 속성을 임의로 변경합니다. 하지만, 자신이 원하는 이미지 라이브러리를 사용할 수도 있습니다. ```py >>> from torchvision.transforms import ColorJitter >>> jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) ``` 이제 모델에 사용할 이미지와 주석을 준비하기 위해 두 개의 전처리 함수를 만듭니다. 이 함수들은 이미지를 `pixel_values`로, 주석을 `labels`로 변환합니다. 훈련 세트의 경우 이미지 프로세서에 이미지를 제공하기 전에 `jitter`를 적용합니다. 테스트 세트의 경우 이미지 프로세서는 `images`를 자르고 정규화하며, 테스트 중에는 데이터 증강이 적용되지 않으므로 `labels`만 자릅니다. ```py >>> def train_transforms(example_batch): ... images = [jitter(x) for x in example_batch["image"]] ... labels = [x for x in example_batch["annotation"]] ... inputs = image_processor(images, labels) ... return inputs >>> def val_transforms(example_batch): ... images = [x for x in example_batch["image"]] ... labels = [x for x in example_batch["annotation"]] ... inputs = image_processor(images, labels) ... return inputs ``` 모든 데이터 세트에 `jitter`를 적용하려면, 🤗 Datasets [`~datasets.Dataset.set_transform`] 함수를 사용하세요. 즉시 변환이 적용되기 때문에 더 빠르고 디스크 공간을 덜 차지합니다: ```py >>> train_ds.set_transform(train_transforms) >>> test_ds.set_transform(val_transforms) ``` </pt> </frameworkcontent> <frameworkcontent> <tf> 이미지 데이터 세트에 데이터 증강을 적용하여 과적합에 대해 모델을 보다 강건하게 만드는 것이 일반적입니다. 이 가이드에서는 [`tf.image`](https://www.tensorflow.org/api_docs/python/tf/image)를 사용하여 이미지의 색상 속성을 임의로 변경합니다. 하지만, 자신이 원하는 이미지 라이브러리를 사용할 수도 있습니다. 별개의 두 변환 함수를 정의합니다: - 이미지 증강을 포함하는 학습 데이터 변환 - 🤗 Transformers의 컴퓨터 비전 모델은 채널 우선 레이아웃을 기대하기 때문에, 이미지만 바꾸는 검증 데이터 변환 ```py >>> import tensorflow as tf >>> def aug_transforms(image): ... image = tf.keras.utils.img_to_array(image) ... image = tf.image.random_brightness(image, 0.25) ... image = tf.image.random_contrast(image, 0.5, 2.0) ... image = tf.image.random_saturation(image, 0.75, 1.25) ... image = tf.image.random_hue(image, 0.1) ... image = tf.transpose(image, (2, 0, 1)) ... return image >>> def transforms(image): ... image = tf.keras.utils.img_to_array(image) ... image = tf.transpose(image, (2, 0, 1)) ... return image ``` 그런 다음 모델을 위해 두 개의 전처리 함수를 만들어 이미지 및 주석 배치를 준비합니다. 이 함수들은 이미지 변환을 적용하고 이전에 로드한 `image_processor`를 사용하여 이미지를 `pixel_values`로, 주석을 `label`로 변환합니다. `ImageProcessor` 는 이미지의 크기 조정과 정규화도 처리합니다. ```py >>> def train_transforms(example_batch): ... images = [aug_transforms(x.convert("RGB")) for x in example_batch["image"]] ... labels = [x for x in example_batch["annotation"]] ... inputs = image_processor(images, labels) ... return inputs >>> def val_transforms(example_batch): ... images = [transforms(x.convert("RGB")) for x in example_batch["image"]] ... labels = [x for x in example_batch["annotation"]] ... inputs = image_processor(images, labels) ... return inputs ``` 전체 데이터 집합에 전처리 변환을 적용하려면 🤗 Datasets [`~datasets.Dataset.set_transform`] 함수를 사용하세요. 즉시 변환이 적용되기 때문에 더 빠르고 디스크 공간을 덜 차지합니다: ```py >>> train_ds.set_transform(train_transforms) >>> test_ds.set_transform(val_transforms) ``` </tf> </frameworkcontent> ## 평가하기[[evaluate]] 훈련 중에 메트릭을 포함하면 모델의 성능을 평가하는 데 도움이 되는 경우가 많습니다. 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 라이브러리를 사용하여 평가 방법을 빠르게 로드할 수 있습니다. 이 태스크에서는 [mean Intersection over Union](https://huggingface.co/spaces/evaluate-metric/accuracy) (IoU) 메트릭을 로드하세요 (메트릭을 로드하고 계산하는 방법에 대해 자세히 알아보려면 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour)를 살펴보세요). ```py >>> import evaluate >>> metric = evaluate.load("mean_iou") ``` 그런 다음 메트릭을 [`~evaluate.EvaluationModule.compute`]하는 함수를 만듭니다. 예측을 먼저 로짓으로 변환한 다음, 레이블의 크기에 맞게 모양을 다시 지정해야 [`~evaluate.EvaluationModule.compute`]를 호출할 수 있습니다: <frameworkcontent> <pt> ```py >>> import numpy as np >>> import torch >>> from torch import nn >>> def compute_metrics(eval_pred): ... with torch.no_grad(): ... logits, labels = eval_pred ... logits_tensor = torch.from_numpy(logits) ... logits_tensor = nn.functional.interpolate( ... logits_tensor, ... size=labels.shape[-2:], ... mode="bilinear", ... align_corners=False, ... ).argmax(dim=1) ... pred_labels = logits_tensor.detach().cpu().numpy() ... metrics = metric.compute( ... predictions=pred_labels, ... references=labels, ... num_labels=num_labels, ... ignore_index=255, ... reduce_labels=False, ... ) ... for key, value in metrics.items(): ... if isinstance(value, np.ndarray): ... metrics[key] = value.tolist() ... return metrics ``` </pt> </frameworkcontent> <frameworkcontent> <tf> ```py >>> def compute_metrics(eval_pred): ... logits, labels = eval_pred ... logits = tf.transpose(logits, perm=[0, 2, 3, 1]) ... logits_resized = tf.image.resize( ... logits, ... size=tf.shape(labels)[1:], ... method="bilinear", ... ) ... pred_labels = tf.argmax(logits_resized, axis=-1) ... metrics = metric.compute( ... predictions=pred_labels, ... references=labels, ... num_labels=num_labels, ... ignore_index=-1, ... reduce_labels=image_processor.do_reduce_labels, ... ) ... per_category_accuracy = metrics.pop("per_category_accuracy").tolist() ... per_category_iou = metrics.pop("per_category_iou").tolist() ... metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) ... metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) ... return {"val_" + k: v for k, v in metrics.items()} ``` </tf> </frameworkcontent> 이제 `compute_metrics` 함수를 사용할 준비가 되었습니다. 트레이닝을 설정할 때 이 함수로 돌아가게 됩니다. ## 학습하기[[train]] <frameworkcontent> <pt> <Tip> 만약 [`Trainer`]를 사용해 모델을 미세 조정하는 것에 익숙하지 않다면, [여기](../training#finetune-with-trainer)에서 기본 튜토리얼을 살펴보세요! </Tip> 이제 모델 학습을 시작할 준비가 되었습니다! [`AutoModelForSemanticSegmentation`]로 SegFormer를 불러오고, 모델에 레이블 ID와 레이블 클래스 간의 매핑을 전달합니다: ```py >>> from transformers import AutoModelForSemanticSegmentation, TrainingArguments, Trainer >>> model = AutoModelForSemanticSegmentation.from_pretrained(checkpoint, id2label=id2label, label2id=label2id) ``` 이제 세 단계만 남았습니다: 1. 학습 하이퍼파라미터를 [`TrainingArguments`]에 정의합니다. `image` 열이 삭제되기 때문에 사용하지 않는 열을 제거하지 않는 것이 중요합니다. `image` 열이 없으면 `pixel_values`을 생성할 수 없습니다. 이런 경우를 방지하려면 `remove_unused_columns=False`로 설정하세요! 유일하게 필요한 다른 매개변수는 모델을 저장할 위치를 지정하는 `output_dir`입니다. `push_to_hub=True`를 설정하여 이 모델을 Hub에 푸시합니다(모델을 업로드하려면 Hugging Face에 로그인해야 합니다). 각 에포크가 끝날 때마다 [`Trainer`]가 IoU 메트릭을 평가하고 학습 체크포인트를 저장합니다. 2. 모델, 데이터 세트, 토크나이저, 데이터 콜레이터, `compute_metrics` 함수와 함께 학습 인자를 [`Trainer`]에 전달하세요. 3. 모델을 미세 조정하기 위해 [`~Trainer.train`]를 호출하세요. ```py >>> training_args = TrainingArguments( ... output_dir="segformer-b0-scene-parse-150", ... learning_rate=6e-5, ... num_train_epochs=50, ... per_device_train_batch_size=2, ... per_device_eval_batch_size=2, ... save_total_limit=3, ... eval_strategy="steps", ... save_strategy="steps", ... save_steps=20, ... eval_steps=20, ... logging_steps=1, ... eval_accumulation_steps=5, ... remove_unused_columns=False, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=train_ds, ... eval_dataset=test_ds, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` 학습이 완료되면, 누구나 모델을 사용할 수 있도록 [`~transformers.Trainer.push_to_hub`] 메서드를 사용해 Hub에 모델을 공유하세요: ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <frameworkcontent> <tf> <Tip> Keras로 모델을 미세 조정하는 데 익숙하지 않은 경우, 먼저 [기본 튜토리얼](../training#train-a-tensorflow-model-with-keras)을 확인해보세요! </Tip> TensorFlow에서 모델을 미세 조정하려면 다음 단계를 따르세요: 1. 학습 하이퍼파라미터를 정의하고 옵티마이저와 학습률 스케쥴러를 설정하세요. 2. 사전 학습된 모델을 인스턴스화하세요. 3. 🤗 Dataset을 `tf.data.Dataset`로 변환하세요. 4. 모델을 컴파일하세요. 5. 콜백을 추가하여 메트릭을 계산하고 🤗 Hub에 모델을 업로드하세요. 6. `fit()` 메서드를 사용하여 훈련을 실행하세요. 하이퍼파라미터, 옵티마이저, 학습률 스케쥴러를 정의하는 것으로 시작하세요: ```py >>> from transformers import create_optimizer >>> batch_size = 2 >>> num_epochs = 50 >>> num_train_steps = len(train_ds) * num_epochs >>> learning_rate = 6e-5 >>> weight_decay_rate = 0.01 >>> optimizer, lr_schedule = create_optimizer( ... init_lr=learning_rate, ... num_train_steps=num_train_steps, ... weight_decay_rate=weight_decay_rate, ... num_warmup_steps=0, ... ) ``` 그런 다음 레이블 매핑과 함께 [`TFAutoModelForSemanticSegmentation`]을 사용하여 SegFormer를 불러오고 옵티마이저로 컴파일합니다. 트랜스포머 모델은 모두 디폴트로 태스크 관련 손실 함수가 있으므로 원치 않으면 지정할 필요가 없습니다: ```py >>> from transformers import TFAutoModelForSemanticSegmentation >>> model = TFAutoModelForSemanticSegmentation.from_pretrained( ... checkpoint, ... id2label=id2label, ... label2id=label2id, ... ) >>> model.compile(optimizer=optimizer) # 손실 함수 인자가 없습니다! ``` [`~datasets.Dataset.to_tf_dataset`] 와 [`DefaultDataCollator`]를 사용해 데이터 세트를 `tf.data.Dataset` 포맷으로 변환하세요: ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator(return_tensors="tf") >>> tf_train_dataset = train_ds.to_tf_dataset( ... columns=["pixel_values", "label"], ... shuffle=True, ... batch_size=batch_size, ... collate_fn=data_collator, ... ) >>> tf_eval_dataset = test_ds.to_tf_dataset( ... columns=["pixel_values", "label"], ... shuffle=True, ... batch_size=batch_size, ... collate_fn=data_collator, ... ) ``` 예측으로 정확도를 계산하고 모델을 🤗 Hub로 푸시하려면 [Keras callbacks](../main_classes/keras_callbacks)를 사용하세요. `compute_metrics` 함수를 [`KerasMetricCallback`]에 전달하고, 모델 업로드를 위해 [`PushToHubCallback`]를 사용하세요: ```py >>> from transformers.keras_callbacks import KerasMetricCallback, PushToHubCallback >>> metric_callback = KerasMetricCallback( ... metric_fn=compute_metrics, eval_dataset=tf_eval_dataset, batch_size=batch_size, label_cols=["labels"] ... ) >>> push_to_hub_callback = PushToHubCallback(output_dir="scene_segmentation", tokenizer=image_processor) >>> callbacks = [metric_callback, push_to_hub_callback] ``` 이제 모델을 훈련할 준비가 되었습니다! 훈련 및 검증 데이터 세트, 에포크 수와 함께 `fit()`을 호출하고, 콜백을 사용하여 모델을 미세 조정합니다: ```py >>> model.fit( ... tf_train_dataset, ... validation_data=tf_eval_dataset, ... callbacks=callbacks, ... epochs=num_epochs, ... ) ``` 축하합니다! 모델을 미세 조정하고 🤗 Hub에 공유했습니다. 이제 추론에 사용할 수 있습니다! </tf> </frameworkcontent> ## 추론하기[[inference]] 이제 모델을 미세 조정했으니 추론에 사용할 수 있습니다! 추론할 이미지를 로드하세요: ```py >>> image = ds[0]["image"] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" alt="Image of bedroom"/> </div> <frameworkcontent> <pt> 추론을 위해 미세 조정한 모델을 시험해 보는 가장 간단한 방법은 [`pipeline`]에서 사용하는 것입니다. 모델을 사용하여 이미지 분할을 위한 `pipeline`을 인스턴스화하고 이미지를 전달합니다: ```py >>> from transformers import pipeline >>> segmenter = pipeline("image-segmentation", model="my_awesome_seg_model") >>> segmenter(image) [{'score': None, 'label': 'wall', 'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062690>}, {'score': None, 'label': 'sky', 'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062A50>}, {'score': None, 'label': 'floor', 'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062B50>}, {'score': None, 'label': 'ceiling', 'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062A10>}, {'score': None, 'label': 'bed ', 'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062E90>}, {'score': None, 'label': 'windowpane', 'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062390>}, {'score': None, 'label': 'cabinet', 'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062550>}, {'score': None, 'label': 'chair', 'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062D90>}, {'score': None, 'label': 'armchair', 'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062E10>}] ``` 원하는 경우 `pipeline`의 결과를 수동으로 복제할 수도 있습니다. 이미지 프로세서로 이미지를 처리하고 `pixel_values`을 GPU에 배치합니다: ```py >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 가능하다면 GPU를 사용하고, 그렇지 않다면 CPU를 사용하세요 >>> encoding = image_processor(image, return_tensors="pt") >>> pixel_values = encoding.pixel_values.to(device) ``` 모델에 입력을 전달하고 `logits`를 반환합니다: ```py >>> outputs = model(pixel_values=pixel_values) >>> logits = outputs.logits.cpu() ``` 그런 다음 로짓의 크기를 원본 이미지 크기로 다시 조정합니다: ```py >>> upsampled_logits = nn.functional.interpolate( ... logits, ... size=image.size[::-1], ... mode="bilinear", ... align_corners=False, ... ) >>> pred_seg = upsampled_logits.argmax(dim=1)[0] ``` </pt> </frameworkcontent> <frameworkcontent> <tf> 이미지 프로세서를 로드하여 이미지를 전처리하고 입력을 TensorFlow 텐서로 반환합니다: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("MariaK/scene_segmentation") >>> inputs = image_processor(image, return_tensors="tf") ``` 모델에 입력을 전달하고 `logits`를 반환합니다: ```py >>> from transformers import TFAutoModelForSemanticSegmentation >>> model = TFAutoModelForSemanticSegmentation.from_pretrained("MariaK/scene_segmentation") >>> logits = model(**inputs).logits ``` 그런 다음 로그를 원본 이미지 크기로 재조정하고 클래스 차원에 argmax를 적용합니다: ```py >>> logits = tf.transpose(logits, [0, 2, 3, 1]) >>> upsampled_logits = tf.image.resize( ... logits, ... # `image.size`가 너비와 높이를 반환하기 때문에 `image`의 모양을 반전시킵니다 ... image.size[::-1], ... ) >>> pred_seg = tf.math.argmax(upsampled_logits, axis=-1)[0] ``` </tf> </frameworkcontent> 결과를 시각화하려면 [dataset color palette](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51)를 각 클래스를 RGB 값에 매핑하는 `ade_palette()`로 로드합니다. 그런 다음 이미지와 예측된 분할 지도(segmentation map)을 결합하여 구성할 수 있습니다: ```py >>> import matplotlib.pyplot as plt >>> import numpy as np >>> color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8) >>> palette = np.array(ade_palette()) >>> for label, color in enumerate(palette): ... color_seg[pred_seg == label, :] = color >>> color_seg = color_seg[..., ::-1] # BGR로 변환 >>> img = np.array(image) * 0.5 + color_seg * 0.5 # 분할 지도으로 이미지 구성 >>> img = img.astype(np.uint8) >>> plt.figure(figsize=(15, 10)) >>> plt.imshow(img) >>> plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-preds.png" alt="Image of bedroom overlaid with segmentation map"/> </div>
transformers/docs/source/ko/tasks/semantic_segmentation.md/0
{ "file_path": "transformers/docs/source/ko/tasks/semantic_segmentation.md", "repo_id": "transformers", "token_count": 14041 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tour rápido [[open-in-colab]] Comece a trabalhar com 🤗 Transformers! Comece usando [`pipeline`] para rápida inferência e facilmente carregue um modelo pré-treinado e um tokenizer com [AutoClass](./model_doc/auto) para resolver tarefas de texto, visão ou áudio. <Tip> Todos os exemplos de código apresentados na documentação têm um botão no canto superior direito para escolher se você deseja ocultar ou mostrar o código no Pytorch ou no TensorFlow. Caso contrário, é esperado que funcione para ambos back-ends sem nenhuma alteração. </Tip> ## Pipeline [`pipeline`] é a maneira mais fácil de usar um modelo pré-treinado para uma dada tarefa. <Youtube id="tiZFewofSLM"/> A [`pipeline`] apoia diversas tarefas fora da caixa: **Texto**: * Análise sentimental: classifica a polaridade de um texto. * Geração de texto (em Inglês): gera texto a partir de uma entrada. * Reconhecimento de entidade mencionada: legenda cada palavra com uma classe que a representa (pessoa, data, local, etc...) * Respostas: extrai uma resposta dado algum contexto e uma questão * Máscara de preenchimento: preenche o espaço, dado um texto com máscaras de palavras. * Sumarização: gera o resumo de um texto longo ou documento. * Tradução: traduz texto para outra língua. * Extração de características: cria um tensor que representa o texto. **Imagem**: * Classificação de imagens: classifica uma imagem. * Segmentação de imagem: classifica cada pixel da imagem. * Detecção de objetos: detecta objetos em uma imagem. **Audio**: * Classficação de áudio: legenda um trecho de áudio fornecido. * Reconhecimento de fala automático: transcreve audio em texto. <Tip> Para mais detalhes sobre a [`pipeline`] e tarefas associadas, siga a documentação [aqui](./main_classes/pipelines). </Tip> ### Uso da pipeline No exemplo a seguir, você usará [`pipeline`] para análise sentimental. Instale as seguintes dependências se você ainda não o fez: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> Importe [`pipeline`] e especifique a tarefa que deseja completar: ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` A pipeline baixa and armazena um [modelo pré-treinado](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) padrão e tokenizer para análise sentimental. Agora você pode usar `classifier` no texto alvo: ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` Para mais de uma sentença, passe uma lista para a [`pipeline`], a qual retornará uma lista de dicionários: ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` A [`pipeline`] também pode iterar sobre um Dataset inteiro. Comece instalando a biblioteca de [🤗 Datasets](https://huggingface.co/docs/datasets/): ```bash pip install datasets ``` Crie uma [`pipeline`] com a tarefa que deseja resolver e o modelo que deseja usar. ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` A seguir, carregue uma base de dados (confira a 🤗 [Iniciação em Datasets](https://huggingface.co/docs/datasets/quickstart) para mais detalhes) que você gostaria de iterar sobre. Por exemplo, vamos carregar o dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` Precisamos garantir que a taxa de amostragem do conjunto de dados corresponda à taxa de amostragem em que o facebook/wav2vec2-base-960h foi treinado. ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` Os arquivos de áudio são carregados e re-amostrados automaticamente ao chamar a coluna `"audio"`. Vamos extrair as arrays de formas de onda originais das primeiras 4 amostras e passá-las como uma lista para o pipeline: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I TURN A JOIN A COUNT'] ``` Para um conjunto de dados maior onde as entradas são maiores (como em fala ou visão), será necessário passar um gerador em vez de uma lista que carregue todas as entradas na memória. Consulte a [documentação do pipeline](./main_classes/pipelines) para mais informações. ### Use outro modelo e tokenizer na pipeline A [`pipeline`] pode acomodar qualquer modelo do [Model Hub](https://huggingface.co/models), facilitando sua adaptação para outros casos de uso. Por exemplo, se você quiser um modelo capaz de lidar com texto em francês, use as tags no Model Hub para filtrar um modelo apropriado. O principal resultado filtrado retorna um [modelo BERT](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) bilíngue ajustado para análise de sentimentos. Ótimo, vamos usar este modelo! ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Use o [`AutoModelForSequenceClassification`] e [`AutoTokenizer`] para carregar o modelo pré-treinado e seu tokenizer associado (mais em `AutoClass` abaixo): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Use o [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] para carregar o modelo pré-treinado e o tokenizer associado (mais em `TFAutoClass` abaixo): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Então você pode especificar o modelo e o tokenizador na [`pipeline`] e aplicar o `classifier` no seu texto alvo: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` Se você não conseguir achar um modelo para o seu caso de uso, precisará usar fine-tune em um modelo pré-treinado nos seus dados. Veja nosso [tutorial de fine-tuning](./training) para descobrir como. Finalmente, depois que você tiver usado esse processo em seu modelo, considere compartilhá-lo conosco (veja o tutorial [aqui](./model_sharing)) na plataforma Model Hub afim de democratizar NLP! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> Por baixo dos panos, as classes [`AutoModelForSequenceClassification`] e [`AutoTokenizer`] trabalham juntas para fortificar o [`pipeline`]. Um [AutoClass](./model_doc/auto) é um atalho que automaticamente recupera a arquitetura de um modelo pré-treinado a partir de seu nome ou caminho. Basta selecionar a `AutoClass` apropriada para sua tarefa e seu tokenizer associado com [`AutoTokenizer`]. Vamos voltar ao nosso exemplo e ver como você pode usar a `AutoClass` para replicar os resultados do [`pipeline`]. ### AutoTokenizer Um tokenizer é responsável por pré-processar o texto em um formato que seja compreensível para o modelo. Primeiro, o tokenizer dividirá o texto em palavras chamadas *tokens*. Existem várias regras que regem o processo de tokenização, incluindo como dividir uma palavra e em que nível (saiba mais sobre tokenização [aqui](./tokenizer_summary)). A coisa mais importante a lembrar, porém, é que você precisa instanciar o tokenizer com o mesmo nome do modelo para garantir que está usando as mesmas regras de tokenização com as quais um modelo foi pré-treinado. Carregue um tokenizer com [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Em seguida, o tokenizer converte os tokens em números para construir um tensor como entrada para o modelo. Isso é conhecido como o *vocabulário* do modelo. Passe o texto para o tokenizer: ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` O tokenizer retornará um dicionário contendo: * [input_ids](./glossary#input-ids): representações numéricas de seus tokens. * [atttention_mask](.glossary#attention-mask): indica quais tokens devem ser atendidos. Assim como o [`pipeline`], o tokenizer aceitará uma lista de entradas. Além disso, o tokenizer também pode preencher e truncar o texto para retornar um lote com comprimento uniforme: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> Leia o tutorial de [pré-processamento](./pré-processamento) para obter mais detalhes sobre tokenização. ### AutoModel <frameworkcontent> <pt> 🤗 Transformers fornecem uma maneira simples e unificada de carregar instâncias pré-treinadas. Isso significa que você pode carregar um [`AutoModel`] como carregaria um [`AutoTokenizer`]. A única diferença é selecionar o [`AutoModel`] correto para a tarefa. Como você está fazendo classificação de texto ou sequência, carregue [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Veja o [sumário de tarefas](./task_summary) para qual classe de [`AutoModel`] usar para cada tarefa. </Tip> Agora você pode passar seu grupo de entradas pré-processadas diretamente para o modelo. Você apenas tem que descompactar o dicionário usando `**`: ```py >>> pt_outputs = pt_model(**pt_batch) ``` O modelo gera as ativações finais no atributo `logits`. Aplique a função softmax aos `logits` para recuperar as probabilidades: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers fornecem uma maneira simples e unificada de carregar instâncias pré-treinadas. Isso significa que você pode carregar um [`TFAutoModel`] como carregaria um [`AutoTokenizer`]. A única diferença é selecionar o [`TFAutoModel`] correto para a tarefa. Como você está fazendo classificação de texto ou sequência, carregue [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Veja o [sumário de tarefas](./task_summary) para qual classe de [`AutoModel`] usar para cada tarefa. </Tip> Agora você pode passar seu grupo de entradas pré-processadas diretamente para o modelo através da passagem de chaves de dicionários ao tensor. ```py >>> tf_outputs = tf_model(tf_batch) ``` O modelo gera as ativações finais no atributo `logits`. Aplique a função softmax aos `logits` para recuperar as probabilidades: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> Todos os modelos de 🤗 Transformers (PyTorch ou TensorFlow) geram tensores *antes* da função de ativação final (como softmax) pois essa função algumas vezes é fundida com a perda. </Tip> Os modelos são um standard [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) ou um [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) para que você possa usá-los em seu loop de treinamento habitual. No entanto, para facilitar as coisas, 🤗 Transformers fornece uma classe [`Trainer`] para PyTorch que adiciona funcionalidade para treinamento distribuído, precisão mista e muito mais. Para o TensorFlow, você pode usar o método `fit` de [Keras](https://keras.io/). Consulte o [tutorial de treinamento](./training) para obter mais detalhes. <Tip> As saídas do modelo 🤗 Transformers são classes de dados especiais para que seus atributos sejam preenchidos automaticamente em um IDE. As saídas do modelo também se comportam como uma tupla ou um dicionário (por exemplo, você pode indexar com um inteiro, uma parte ou uma string), caso em que os atributos `None` são ignorados. </Tip> ### Salvar um modelo <frameworkcontent> <pt> Uma vez que seu modelo estiver afinado, você pode salvá-lo com seu Tokenizer usando [`PreTrainedModel.save_pretrained`]: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` Quando você estiver pronto para usá-lo novamente, recarregue com [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Uma vez que seu modelo estiver afinado, você pode salvá-lo com seu Tokenizer usando [`TFPreTrainedModel.save_pretrained`]: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` Quando você estiver pronto para usá-lo novamente, recarregue com [`TFPreTrainedModel.from_pretrained`] ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> Um recurso particularmente interessante dos 🤗 Transformers é a capacidade de salvar um modelo e recarregá-lo como um modelo PyTorch ou TensorFlow. Use `from_pt` ou `from_tf` para converter o modelo de um framework para outro: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </tf> </frameworkcontent>
transformers/docs/source/pt/quicktour.md/0
{ "file_path": "transformers/docs/source/pt/quicktour.md", "repo_id": "transformers", "token_count": 6104 }
<!--版权2023年HuggingFace团队保留所有权利。 根据Apache许可证第2.0版(“许可证”)许可;除非符合许可证,否则您不得使用此文件。您可以在以下网址获取许可证的副本: http://www.apache.org/licenses/LICENSE-2.0 除非适用法律要求或书面同意,否则按“按原样”分发的软件,无论是明示还是暗示的,都没有任何担保或条件。请参阅许可证以了解特定语言下的权限和限制。 ⚠️ 请注意,本文件虽然使用Markdown编写,但包含了特定的语法,适用于我们的doc-builder(类似于MDX),可能无法在您的Markdown查看器中正常渲染。 --> # 注意力机制 大多数 transformer 模型使用完全注意力机制,该机制采用正方形的注意力矩阵。当输入很长的文本时,这将导致巨大的计算瓶颈。Longformer 和 Reformer 是提高注意力机制效率的改进模型,它们使用稀疏化的注意力矩阵来加速训练。 ## 局部敏感哈希注意力机制(LSH attention) [Reformer](model_doc/reformer)使用LSH(局部敏感哈希)的注意力机制。在计算softmax(QK^t)时,只有矩阵QK^t中的最大元素(在softmax维度上)会做出有用的贡献。所以对于Q中的每个查询q,我们只需要考虑K中与q接近的键k,这里使用了一个哈希函数来确定q和k是否接近。注意力掩码被修改以掩盖当前的词符(token)(除了第一个位置之外),因为这样会使得查询和键相等(因此非常相似)。由于哈希可能会有些随机性,所以在实践中使用多个哈希函数(由n_rounds参数确定),然后一起求平均。 ## 局部注意力机制(Local attention) [Longformer](model_doc/longformer)使用局部注意力机制:通常情况下,局部上下文(例如,左边和右边的两个词符是什么?)对于给定词符的操作已经足够了。此外,通过堆叠具有小窗口的注意力层,最后一层将拥有不仅仅是窗口内词符的感受野,这使得它们能构建整个句子的表示。 一些预先选定的输入词符也被赋予全局注意力:对于这些少数词符,注意力矩阵可以访问所有词符(tokens),并且这个过程是对称的:所有其他词符除了它们局部窗口内的词符之外,也可以访问这些特定的词符。这在论文的图2d中有展示,下面是一个样本注意力掩码: <div class="flex justify-center"> <img scale="50 %" align="center" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/local_attention_mask.png"/> </div> 使用参数更少的注意力矩阵,可以让模型处理更长的输入序列。 ## 其他技巧 ### 轴向位置编码 [Reformer](model_doc/reformer)模型使用轴向位置编码:在传统的transformer模型中,位置编码矩阵E的大小是\\(l\\)乘以\\(d\\),其中\\(l\\)是序列长度,\\(d\\)是隐藏状态的维度。如果你有非常长的文本,这个矩阵可能会非常大,将会占用大量的GPU显存。为了缓解这个问题,轴向位置编码将这个大矩阵E分解成两个较小的矩阵E1和E2,它们的维度分别是\\(l_{1} \times d_{1}\\) 和\\(l_{2} \times d_{2}\\),满足\\(l_{1} \times l_{2} = l\\)和\\(d_{1} + d_{2} = d\\)(通过长度的乘积,最终得到的矩阵要小得多)。在E中,对于时间步\\(j\\) 的嵌入是通过连接E1中时间步 \\(j \% l1\\) 的嵌入和E2中时间步\\(j // l1\\)的嵌入来获得的。
transformers/docs/source/zh/attention.md/0
{ "file_path": "transformers/docs/source/zh/attention.md", "repo_id": "transformers", "token_count": 2385 }
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 训练用的定制硬件 您用来运行模型训练和推断的硬件可能会对性能产生重大影响。要深入了解 GPU,务必查看 Tim Dettmer 出色的[博文](https://timdettmers.com/2020/09/07/which-gpu-for-deep-learning/)。 让我们来看一些关于 GPU 配置的实用建议。 ## GPU 当你训练更大的模型时,基本上有三种选择: - 更大的 GPU - 更多的 GPU - 更多的 CPU 和 NVMe(通过[DeepSpeed-Infinity](main_classes/deepspeed#nvme-support)实现) 让我们从只有一块GPU的情况开始。 ### 供电和散热 如果您购买了昂贵的高端GPU,请确保为其提供正确的供电和足够的散热。 **供电**: 一些高端消费者级GPU卡具有2个,有时甚至3个PCI-E-8针电源插口。请确保将与插口数量相同的独立12V PCI-E-8针线缆插入卡中。不要使用同一根线缆两端的2个分叉(也称为pigtail cable)。也就是说,如果您的GPU上有2个插口,您需要使用2条PCI-E-8针线缆连接电源和卡,而不是使用一条末端有2个PCI-E-8针连接器的线缆!否则,您无法充分发挥卡的性能。 每个PCI-E-8针电源线缆需要插入电源侧的12V轨上,并且可以提供最多150W的功率。 其他一些卡可能使用PCI-E-12针连接器,这些连接器可以提供最多500-600W的功率。 低端卡可能使用6针连接器,这些连接器可提供最多75W的功率。 此外,您需要选择具有稳定电压的高端电源。一些质量较低的电源可能无法为卡提供所需的稳定电压以发挥其最大性能。 当然,电源还需要有足够的未使用的瓦数来为卡供电。 **散热**: 当GPU过热时,它将开始降频,不会提供完整的性能。如果温度过高,可能会缩短GPU的使用寿命。 当GPU负载很重时,很难确定最佳温度是多少,但任何低于+80度的温度都是好的,越低越好,也许在70-75度之间是一个非常好的范围。降频可能从大约84-90度开始。但是除了降频外,持续的高温可能会缩短GPU的使用寿命。 接下来让我们看一下拥有多个GPU时最重要的方面之一:连接。 ### 多GPU连接 如果您使用多个GPU,则卡之间的互连方式可能会对总训练时间产生巨大影响。如果GPU位于同一物理节点上,您可以运行以下代码: ```bash nvidia-smi topo -m ``` 它将告诉您GPU如何互连。在具有双GPU并通过NVLink连接的机器上,您最有可能看到类似以下内容: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X NV2 0-23 N/A GPU1 NV2 X 0-23 N/A ``` 在不同的机器上,如果没有NVLink,我们可能会看到: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X PHB 0-11 N/A GPU1 PHB X 0-11 N/A ``` 这个报告包括了这个输出: ``` X = Self SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI) NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU) PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge) PIX = Connection traversing at most a single PCIe bridge NV# = Connection traversing a bonded set of # NVLinks ``` 因此,第一个报告`NV2`告诉我们GPU通过2个NVLink互连,而第二个报告`PHB`展示了典型的消费者级PCIe+Bridge设置。 检查你的设置中具有哪种连接类型。其中一些会使卡之间的通信更快(例如NVLink),而其他则较慢(例如PHB)。 根据使用的扩展解决方案的类型,连接速度可能会产生重大或较小的影响。如果GPU很少需要同步,就像在DDP中一样,那么较慢的连接的影响将不那么显著。如果GPU经常需要相互发送消息,就像在ZeRO-DP中一样,那么更快的连接对于实现更快的训练变得非常重要。 #### NVlink [NVLink](https://en.wikipedia.org/wiki/NVLink)是由Nvidia开发的一种基于线缆的串行多通道近程通信链接。 每个新一代提供更快的带宽,例如在[Nvidia Ampere GA102 GPU架构](https://www.nvidia.com/content/dam/en-zz/Solutions/geforce/ampere/pdf/NVIDIA-ampere-GA102-GPU-Architecture-Whitepaper-V1.pdf)中有这样的引述: > Third-Generation NVLink® > GA102 GPUs utilize NVIDIA’s third-generation NVLink interface, which includes four x4 links, > with each link providing 14.0625 GB/sec bandwidth in each direction between two GPUs. Four > links provide 56.25 GB/sec bandwidth in each direction, and 112.5 GB/sec total bandwidth > between two GPUs. Two RTX 3090 GPUs can be connected together for SLI using NVLink. > (Note that 3-Way and 4-Way SLI configurations are not supported.) 所以,在`nvidia-smi topo -m`输出的`NVX`报告中获取到的更高的`X`值意味着更好的性能。生成的结果将取决于您的GPU架构。 让我们比较在小样本wikitext上训练gpt2语言模型的执行结果。 结果是: | NVlink | Time | | ----- | ---: | | Y | 101s | | N | 131s | 可以看到,NVLink使训练速度提高了约23%。在第二个基准测试中,我们使用`NCCL_P2P_DISABLE=1`告诉GPU不要使用NVLink。 这里是完整的基准测试代码和输出: ```bash # DDP w/ NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} ``` 硬件: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks (`NV2` in `nvidia-smi topo -m`) 软件: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0`
transformers/docs/source/zh/perf_hardware.md/0
{ "file_path": "transformers/docs/source/zh/perf_hardware.md", "repo_id": "transformers", "token_count": 3946 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. `` ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Transformers与Tiktonken的互操作性 在🤗 transformers中,当使用`from_pretrained`方法从Hub加载模型时,如果模型包含tiktoken格式的`tokenizer.model`文件,框架可以无缝支持tiktoken模型文件,并自动将其转换为我们的[快速词符化器](https://huggingface.co/docs/transformers/main/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast)。 ### 已知包含`tiktoken.model`文件发布的模型: - gpt2 - llama3 ## 使用示例 为了在transformers中正确加载`tiktoken`文件,请确保`tiktoken.model`文件是tiktoken格式的,并且会在加载`from_pretrained`时自动加载。以下展示如何从同一个文件中加载词符化器(tokenizer)和模型: ```py from transformers import AutoTokenizer model_id = "meta-llama/Meta-Llama-3-8B-Instruct" tokenizer = AutoTokenizer.from_pretrained(model_id, subfolder="original") ``` ## 创建tiktoken词符化器(tokenizer) `tokenizer.model`文件中不包含任何额外的词符(token)或模式字符串(pattern strings)的信息。如果这些信息很重要,需要将词符化器(tokenizer)转换为适用于[`PreTrainedTokenizerFast`]类的`tokenizer.json`格式。 使用[tiktoken.get_encoding](https://github.com/openai/tiktoken/blob/63527649963def8c759b0f91f2eb69a40934e468/tiktoken/registry.py#L63)生成`tokenizer.model`文件,再使用[`convert_tiktoken_to_fast`]函数将其转换为`tokenizer.json`文件。 ```py from transformers.integrations.tiktoken import convert_tiktoken_to_fast from tiktoken import get_encoding # You can load your custom encoding or the one provided by OpenAI encoding = get_encoding("gpt2") convert_tiktoken_to_fast(encoding, "config/save/dir") ``` 生成的`tokenizer.json`文件将被保存到指定的目录,并且可以通过[`PreTrainedTokenizerFast`]类来加载。 ```py tokenizer = PreTrainedTokenizerFast.from_pretrained("config/save/dir") ```
transformers/docs/source/zh/tiktoken.md/0
{ "file_path": "transformers/docs/source/zh/tiktoken.md", "repo_id": "transformers", "token_count": 1169 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ import json import logging import math import os import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from itertools import chain # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. from pathlib import Path from typing import Dict, List, Optional, Tuple import flax import jax import jax.numpy as jnp import numpy as np import optax from datasets import load_dataset from flax import jax_utils, traverse_util from flax.jax_utils import pad_shard_unpad from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import HfApi from tqdm import tqdm from transformers import ( CONFIG_MAPPING, FLAX_MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoTokenizer, FlaxAutoModelForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, is_tensorboard_available, set_seed, ) from transformers.utils import send_example_telemetry MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class TrainingArguments: output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."}) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."}) eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."}) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) push_to_hub: bool = field( default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} ) hub_model_id: str = field( default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) gradient_checkpointing: bool = field( default=False, metadata={ "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." }, ) def __post_init__(self): if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ d = asdict(self) for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith("_token"): d[k] = f"<{k.upper()}>" return d @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) line_by_line: bool = field( default=False, metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." @flax.struct.dataclass class FlaxDataCollatorForLanguageModeling: """ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. mlm_probability (:obj:`float`, `optional`, defaults to 0.15): The probability with which to (randomly) mask tokens in the input. .. note:: For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the argument :obj:`return_special_tokens_mask=True`. """ tokenizer: PreTrainedTokenizerBase mlm_probability: float = 0.15 def __post_init__(self): if self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. " "You should pass `mlm=False` to train on causal language modeling instead." ) def __call__(self, examples: List[Dict[str, np.ndarray]], pad_to_multiple_of: int) -> Dict[str, np.ndarray]: # Handle dict or lists with proper padding and conversion to tensor. batch = self.tokenizer.pad(examples, pad_to_multiple_of=pad_to_multiple_of, return_tensors=TensorType.NUMPY) # If special token mask has been preprocessed, pop it from the dict. special_tokens_mask = batch.pop("special_tokens_mask", None) batch["input_ids"], batch["labels"] = self.mask_tokens( batch["input_ids"], special_tokens_mask=special_tokens_mask ) return batch def mask_tokens( self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray] ) -> Tuple[np.ndarray, np.ndarray]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.copy() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = np.full(labels.shape, self.mlm_probability) special_tokens_mask = special_tokens_mask.astype("bool") probability_matrix[special_tokens_mask] = 0.0 masked_indices = np.random.binomial(1, probability_matrix).astype("bool") labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool") indices_random &= masked_indices & ~indices_replaced random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4") inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels def generate_batch_splits(samples_idx: np.ndarray, batch_size: int, drop_last=True) -> np.ndarray: """Generate batches of data for a specified batch size from sample indices. If the dataset size is not divisible by the batch size and `drop_last` is `True`, the last incomplete batch is dropped. Else, it is returned.""" num_samples = len(samples_idx) if drop_last: samples_to_remove = num_samples % batch_size if samples_to_remove != 0: samples_idx = samples_idx[:-samples_to_remove] sections_split = num_samples // batch_size samples_idx = samples_idx.reshape((sections_split, batch_size)) else: sections_split = math.ceil(num_samples / batch_size) samples_idx = np.array_split(samples_idx, sections_split) return samples_idx def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mlm", model_args, data_args, framework="flax") if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.INFO, datefmt="[%X]", ) # Log on each process the small summary: logger = logging.getLogger(__name__) # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Handle the repository creation if training_args.push_to_hub: # Retrieve of infer repo_name repo_name = training_args.hub_model_id if repo_name is None: repo_name = Path(training_args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, trust_remote_code=model_args.trust_remote_code, ) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, trust_remote_code=model_args.trust_remote_code, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, trust_remote_code=model_args.trust_remote_code, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, token=model_args.token, num_proc=data_args.preprocessing_num_workers, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: config = AutoConfig.from_pretrained( model_args.config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) if data_args.line_by_line: # When using line_by_line, we just tokenize each nonempty line. padding = "max_length" if data_args.pad_to_max_length else False def tokenize_function(examples): # Remove empty lines examples = [line for line in examples if len(line) > 0 and not line.isspace()] return tokenizer( examples, return_special_tokens_mask=True, padding=padding, truncation=True, max_length=max_seq_length, ) tokenized_datasets = datasets.map( tokenize_function, input_columns=[text_column_name], batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) else: # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts. # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more # efficient when it receives the `special_tokens_mask`. def tokenize_function(examples): return tokenizer(examples[text_column_name], return_special_tokens_mask=True) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # Main data processing function that will concatenate all texts from our dataset and generate chunks of # max_seq_length. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= max_seq_length: total_length = (total_length // max_seq_length) * max_seq_length # Split by chunks of max_len. result = { k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)] for k, t in concatenated_examples.items() } return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a # remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value # might be slower to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/process#map tokenized_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Enable tensorboard only on the master node has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) # Data collator # This one will take care of randomly masking the tokens. data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) if model_args.model_name_or_path: model = FlaxAutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: model = FlaxAutoModelForMaskedLM.from_config( config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), trust_remote_code=model_args.trust_remote_code, ) if training_args.gradient_checkpointing: model.enable_gradient_checkpointing() # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs # Create learning rate schedule warmup_fn = optax.linear_schedule( init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps ) decay_fn = optax.linear_schedule( init_value=training_args.learning_rate, end_value=0, transition_steps=num_train_steps - training_args.warmup_steps, ) linear_decay_lr_schedule_fn = optax.join_schedules( schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps] ) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = { layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() } flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) # create adam optimizer if training_args.adafactor: # We use the default parameters here to initialize adafactor, # For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74 optimizer = optax.adafactor( learning_rate=linear_decay_lr_schedule_fn, ) else: optimizer = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) # Setup train state state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer) # Define gradient update step fn def train_step(state, batch, dropout_rng): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def loss_fn(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # take average loss = loss.sum() num_labels = label_mask.sum() return loss, num_labels grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} return new_state, metrics, new_dropout_rng # Create parallel version of the train step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Define eval fn def eval_step(params, batch): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # compute accuracy accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask # summarize metrics metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()} metrics = jax.lax.psum(metrics, axis_name="batch") return metrics p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) train_time = 0 epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_start = time.time() train_metrics = [] # Create sampling rng rng, input_rng = jax.random.split(rng) # Generate an epoch by shuffling sampling indices from the train dataset num_train_samples = len(tokenized_datasets["train"]) # Avoid using jax.numpy here in case of TPU training train_samples_idx = np.random.permutation(np.arange(num_train_samples)) train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size) # Gather the indexes for creating the batch and do a training step for step, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)): samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples, pad_to_multiple_of=16) # Model forward model_inputs = shard(model_inputs.data) state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs) train_metrics.append(train_metric) cur_step = epoch * (num_train_samples // train_batch_size) + step if cur_step % training_args.logging_steps == 0 and cur_step > 0: # Save metrics train_metric = jax_utils.unreplicate(train_metric) train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, cur_step) epochs.write( f"Step... ({cur_step} | Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) train_metrics = [] if cur_step % training_args.eval_steps == 0 and cur_step > 0: # ======================== Evaluating ============================== num_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False) eval_metrics = [] for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples, pad_to_multiple_of=16) # Model forward metrics = pad_shard_unpad(p_eval_step, static_return=True)( state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size ) eval_metrics.append(metrics) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) # Update progress bar epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})" # Save metrics if has_tensorboard and jax.process_index() == 0: write_eval_metric(summary_writer, eval_metrics, cur_step) if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: api.upload_folder( commit_message=f"Saving weights and logs of step {cur_step}", folder_path=training_args.output_dir, repo_id=repo_id, repo_type="model", token=training_args.hub_token, ) # Eval after training if training_args.do_eval: num_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False) eval_metrics = [] for _, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples, pad_to_multiple_of=16) # Model forward metrics = pad_shard_unpad(p_eval_step, static_return=True)( state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size ) eval_metrics.append(metrics) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) try: perplexity = math.exp(eval_metrics["loss"]) except OverflowError: perplexity = float("inf") eval_metrics["perplexity"] = perplexity if jax.process_index() == 0: eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()} path = os.path.join(training_args.output_dir, "eval_results.json") with open(path, "w") as f: json.dump(eval_metrics, f, indent=4, sort_keys=True) if __name__ == "__main__": main()
transformers/examples/flax/language-modeling/run_mlm_flax.py/0
{ "file_path": "transformers/examples/flax/language-modeling/run_mlm_flax.py", "repo_id": "transformers", "token_count": 17055 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Finetuning a 🤗 Flax Transformers model for sequence classification on GLUE.""" import json import logging import math import os import random import sys import time import warnings from dataclasses import dataclass, field from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import evaluate import jax import jax.numpy as jnp import numpy as np import optax from datasets import load_dataset from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import HfApi from tqdm import tqdm import transformers from transformers import ( AutoConfig, AutoTokenizer, FlaxAutoModelForSequenceClassification, HfArgumentParser, PretrainedConfig, TrainingArguments, is_tensorboard_available, ) from transformers.utils import check_min_version, send_example_telemetry logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") Array = Any Dataset = datasets.arrow_dataset.Dataset PRNGKey = Any task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) use_slow_tokenizer: Optional[bool] = field( default=False, metadata={"help": "If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library)."}, ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field( default=None, metadata={"help": f"The name of the glue task to train on. choices {list(task_to_keys.keys())}"} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) text_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."} ) label_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."} ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_seq_length: int = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. If set, sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) def __post_init__(self): if self.task_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." self.task_name = self.task_name.lower() if isinstance(self.task_name, str) else self.task_name def create_train_state( model: FlaxAutoModelForSequenceClassification, learning_rate_fn: Callable[[int], float], is_regression: bool, num_labels: int, weight_decay: float, ) -> train_state.TrainState: """Create initial training state.""" class TrainState(train_state.TrainState): """Train state with an Optax optimizer. The two functions below differ depending on whether the task is classification or regression. Args: logits_fn: Applied to last layer to obtain the logits. loss_fn: Function to compute the loss. """ logits_fn: Callable = struct.field(pytree_node=False) loss_fn: Callable = struct.field(pytree_node=False) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = { layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() } flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) tx = optax.adamw( learning_rate=learning_rate_fn, b1=0.9, b2=0.999, eps=1e-6, weight_decay=weight_decay, mask=decay_mask_fn ) if is_regression: def mse_loss(logits, labels): return jnp.mean((logits[..., 0] - labels) ** 2) return TrainState.create( apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=lambda logits: logits[..., 0], loss_fn=mse_loss, ) else: # Classification. def cross_entropy_loss(logits, labels): xentropy = optax.softmax_cross_entropy(logits, onehot(labels, num_classes=num_labels)) return jnp.mean(xentropy) return TrainState.create( apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=lambda logits: logits.argmax(-1), loss_fn=cross_entropy_loss, ) def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.ndarray]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def glue_train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int): """Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.""" steps_per_epoch = len(dataset) // batch_size perms = jax.random.permutation(rng, len(dataset)) perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch. perms = perms.reshape((steps_per_epoch, batch_size)) for perm in perms: batch = dataset[perm] batch = {k: np.array(v) for k, v in batch.items()} batch = shard(batch) yield batch def glue_eval_data_collator(dataset: Dataset, batch_size: int): """Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop.""" batch_idx = np.arange(len(dataset)) steps_per_epoch = math.ceil(len(dataset) / batch_size) batch_idx = np.array_split(batch_idx, steps_per_epoch) for idx in batch_idx: batch = dataset[idx] batch = {k: np.array(v) for k, v in batch.items()} yield batch def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_glue", model_args, data_args, framework="flax") # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # Handle the repository creation if training_args.push_to_hub: # Retrieve of infer repo_name repo_name = training_args.hub_model_id if repo_name is None: repo_name = Path(training_args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( "glue", data_args.task_name, token=model_args.token, ) else: # Loading the dataset from local csv or json file. data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = (data_args.train_file if data_args.train_file is not None else data_args.valid_file).split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, token=model_args.token, ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = raw_datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer config = AutoConfig.from_pretrained( model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, use_fast=not model_args.use_slow_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = FlaxAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, config=config, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # Preprocessing the datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): logger.info( f"The configuration of the model provided the following label correspondence: {label_name_to_id}. " "Using it!" ) label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: " f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None: label_to_id = {v: i for i, v in enumerate(label_list)} def preprocess_function(examples): # Tokenize the texts texts = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*texts, padding="max_length", max_length=data_args.max_seq_length, truncation=True) if "label" in examples: if label_to_id is not None: # Map labels to IDs (not necessary for GLUE tasks) result["labels"] = [label_to_id[l] for l in examples["label"]] else: # In all cases, rename the column to labels because the model will expect that. result["labels"] = examples["label"] return result processed_datasets = raw_datasets.map( preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Define a summary writer has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(training_args.output_dir) summary_writer.hparams({**training_args.to_dict(), **vars(model_args), **vars(data_args)}) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) num_epochs = int(training_args.num_train_epochs) rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) train_batch_size = int(training_args.per_device_train_batch_size) * jax.local_device_count() per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() learning_rate_fn = create_learning_rate_fn( len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate, ) state = create_train_state( model, learning_rate_fn, is_regression, num_labels=num_labels, weight_decay=training_args.weight_decay ) # define step functions def train_step( state: train_state.TrainState, batch: Dict[str, Array], dropout_rng: PRNGKey ) -> Tuple[train_state.TrainState, float]: """Trains model with an optimizer (both in `state`) on `batch`, returning a pair `(new_state, loss)`.""" dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) targets = batch.pop("labels") def loss_fn(params): logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss = state.loss_fn(logits, targets) return loss grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = jax.lax.pmean({"loss": loss, "learning_rate": learning_rate_fn(state.step)}, axis_name="batch") return new_state, metrics, new_dropout_rng p_train_step = jax.pmap(train_step, axis_name="batch", donate_argnums=(0,)) def eval_step(state, batch): logits = state.apply_fn(**batch, params=state.params, train=False)[0] return state.logits_fn(logits) p_eval_step = jax.pmap(eval_step, axis_name="batch") if data_args.task_name is not None: metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir) else: metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) logger.info(f"===== Starting training ({num_epochs} epochs) =====") train_time = 0 # make sure weights are replicated on each device state = replicate(state) steps_per_epoch = len(train_dataset) // train_batch_size total_steps = steps_per_epoch * num_epochs epochs = tqdm(range(num_epochs), desc=f"Epoch ... (0/{num_epochs})", position=0) for epoch in epochs: train_start = time.time() train_metrics = [] # Create sampling rng rng, input_rng = jax.random.split(rng) # train train_loader = glue_train_data_collator(input_rng, train_dataset, train_batch_size) for step, batch in enumerate( tqdm( train_loader, total=steps_per_epoch, desc="Training...", position=1, ), ): state, train_metric, dropout_rngs = p_train_step(state, batch, dropout_rngs) train_metrics.append(train_metric) cur_step = (epoch * steps_per_epoch) + (step + 1) if cur_step % training_args.logging_steps == 0 and cur_step > 0: # Save metrics train_metric = unreplicate(train_metric) train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, cur_step) epochs.write( f"Step... ({cur_step}/{total_steps} | Training Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) train_metrics = [] if (cur_step % training_args.eval_steps == 0 or cur_step % steps_per_epoch == 0) and cur_step > 0: # evaluate eval_loader = glue_eval_data_collator(eval_dataset, eval_batch_size) for batch in tqdm( eval_loader, total=math.ceil(len(eval_dataset) / eval_batch_size), desc="Evaluating ...", position=2, ): labels = batch.pop("labels") predictions = pad_shard_unpad(p_eval_step)( state, batch, min_device_batch=per_device_eval_batch_size ) metric.add_batch(predictions=np.array(predictions), references=labels) eval_metric = metric.compute() logger.info(f"Step... ({cur_step}/{total_steps} | Eval metrics: {eval_metric})") if has_tensorboard and jax.process_index() == 0: write_eval_metric(summary_writer, eval_metric, cur_step) if (cur_step % training_args.save_steps == 0 and cur_step > 0) or (cur_step == total_steps): # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(unreplicate(state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: api.upload_folder( commit_message=f"Saving weights and logs of epoch {epoch}", folder_path=training_args.output_dir, repo_id=repo_id, repo_type="model", token=training_args.hub_token, ) epochs.desc = f"Epoch ... {epoch + 1}/{num_epochs}" # save the eval metrics in json if jax.process_index() == 0: eval_metric = {f"eval_{metric_name}": value for metric_name, value in eval_metric.items()} path = os.path.join(training_args.output_dir, "eval_results.json") with open(path, "w") as f: json.dump(eval_metric, f, indent=4, sort_keys=True) if __name__ == "__main__": main()
transformers/examples/flax/text-classification/run_flax_glue.py/0
{ "file_path": "transformers/examples/flax/text-classification/run_flax_glue.py", "repo_id": "transformers", "token_count": 12138 }
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors logger = logging.getLogger(__name__) class GLUETransformer(BaseTransformer): mode = "sequence-classification" def __init__(self, hparams): if isinstance(hparams, dict): hparams = Namespace(**hparams) hparams.glue_output_mode = glue_output_modes[hparams.task] num_labels = glue_tasks_num_labels[hparams.task] super().__init__(hparams, num_labels, self.mode) def forward(self, **inputs): return self.model(**inputs) def training_step(self, batch, batch_idx): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: inputs["token_type_ids"] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None outputs = self(**inputs) loss = outputs[0] lr_scheduler = self.trainer.lr_schedulers[0]["scheduler"] tensorboard_logs = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def prepare_data(self): "Called to initialize data. Use the call to construct features" args = self.hparams processor = processors[args.task]() self.labels = processor.get_labels() for mode in ["train", "dev"]: cached_features_file = self._feature_file(mode) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) examples = ( processor.get_dev_examples(args.data_dir) if mode == "dev" else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode, ) logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) def get_dataloader(self, mode: str, batch_size: int, shuffle: bool = False) -> DataLoader: "Load datasets. Called after prepare data." # We test on dev set to compare to benchmarks without having to submit to GLUE server mode = "dev" if mode == "test" else mode cached_features_file = self._feature_file(mode) logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if self.hparams.glue_output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif self.hparams.glue_output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) return DataLoader( TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels), batch_size=batch_size, shuffle=shuffle, ) def validation_step(self, batch, batch_idx): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: inputs["token_type_ids"] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None outputs = self(**inputs) tmp_eval_loss, logits = outputs[:2] preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _eval_end(self, outputs) -> tuple: val_loss_mean = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item() preds = np.concatenate([x["pred"] for x in outputs], axis=0) if self.hparams.glue_output_mode == "classification": preds = np.argmax(preds, axis=1) elif self.hparams.glue_output_mode == "regression": preds = np.squeeze(preds) out_label_ids = np.concatenate([x["target"] for x in outputs], axis=0) out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] results = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task, preds, out_label_ids)} ret = dict(results.items()) ret["log"] = results return ret, preds_list, out_label_list def validation_epoch_end(self, outputs: list) -> dict: ret, preds, targets = self._eval_end(outputs) logs = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def test_epoch_end(self, outputs) -> dict: ret, predictions, targets = self._eval_end(outputs) logs = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) parser.add_argument( "--max_seq_length", default=128, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--task", default="", type=str, required=True, help="The GLUE task to run", ) parser.add_argument( "--gpus", default=0, type=int, help="The number of GPUs allocated for this, it is by default 0 meaning none", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) return parser def main(): parser = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) parser = GLUETransformer.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: args.output_dir = os.path.join( "./results", f"{args.task}_{time.strftime('%Y%m%d_%H%M%S')}", ) os.makedirs(args.output_dir) model = GLUETransformer(args) trainer = generic_train(model, args) # Optionally, predict on dev set and write to output_dir if args.do_predict: checkpoints = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True)) model = model.load_from_checkpoint(checkpoints[-1]) return trainer.test(model) if __name__ == "__main__": main()
transformers/examples/legacy/pytorch-lightning/run_glue.py/0
{ "file_path": "transformers/examples/legacy/pytorch-lightning/run_glue.py", "repo_id": "transformers", "token_count": 3476 }
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Union import fire import torch from tqdm import tqdm def convert(src_path: str, map_location: str = "cpu", save_path: Union[str, None] = None) -> None: """Convert a pytorch_model.bin or model.pt file to torch.float16 for faster downloads, less disk space.""" state_dict = torch.load(src_path, map_location=map_location) for k, v in tqdm(state_dict.items()): if not isinstance(v, torch.Tensor): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin") state_dict[k] = v.half() if save_path is None: # overwrite src_path save_path = src_path torch.save(state_dict, save_path) if __name__ == "__main__": fire.Fire(convert)
transformers/examples/legacy/seq2seq/convert_model_to_fp16.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/convert_model_to_fp16.py", "repo_id": "transformers", "token_count": 450 }
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from utils import ( Seq2SeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) logger = getLogger(__name__) def eval_data_dir( data_dir, save_dir: str, model_name: str, bs: int = 8, max_source_length: int = 1024, type_path="val", n_obs=None, fp16=False, task="summarization", local_rank=None, num_return_sequences=1, dataset_kwargs: Dict = None, prefix="", **generate_kwargs, ) -> Dict: """Run evaluation on part of the data for one gpu and save to {save_dir}/rank_{rank}_output.json""" model_name = str(model_name) assert local_rank is not None torch.distributed.init_process_group(backend="nccl", rank=local_rank) save_dir = Path(save_dir) save_path = save_dir.joinpath(f"rank_{local_rank}_output.json") torch.cuda.set_device(local_rank) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).cuda() if fp16: model = model.half() # determine if we need to increase num_beams use_task_specific_params(model, task) # update config with task specific params num_beams = generate_kwargs.pop("num_beams", model.config.num_beams) # AttributeError risk? if num_return_sequences > num_beams: num_beams = num_return_sequences tokenizer = AutoTokenizer.from_pretrained(model_name) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. if max_source_length is None: max_source_length = tokenizer.model_max_length if prefix is None: prefix = prefix or getattr(model.config, "prefix", "") or "" ds = Seq2SeqDataset( tokenizer, data_dir, max_source_length, max_target_length=1024, type_path=type_path, n_obs=n_obs, prefix=prefix, **dataset_kwargs, ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. sampler = ds.make_sortish_sampler(bs, distributed=True, add_extra_examples=False, shuffle=True) data_loader = DataLoader(ds, sampler=sampler, batch_size=bs, collate_fn=ds.collate_fn) results = [] for batch in tqdm(data_loader): summaries = model.generate( input_ids=batch["input_ids"].to(model.device), attention_mask=batch["attention_mask"].to(model.device), num_return_sequences=num_return_sequences, num_beams=num_beams, **generate_kwargs, ) preds = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) ids = batch["ids"] if num_return_sequences > 1: preds = chunks(preds, num_return_sequences) # batch size chunks, each of size num_return_seq for i, pred in enumerate(preds): results.append({"pred": pred, "id": ids[i].item()}) save_json(results, save_path) return results, sampler.num_replicas def run_generate(): parser = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir", type=str, help="like cnn_dm/test.source") parser.add_argument( "--model_name", type=str, help="like facebook/bart-large-cnn,google-t5/t5-base, etc.", default="sshleifer/distilbart-xsum-12-3", ) parser.add_argument("--save_dir", type=str, help="where to save", default="tmp_gen") parser.add_argument("--max_source_length", type=int, default=None) parser.add_argument( "--type_path", type=str, default="test", help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--local_rank", type=int, default=-1, required=False, help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs", type=int, default=None, required=False, help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences", type=int, default=1, required=False, help="How many sequences to return" ) parser.add_argument( "--sync_timeout", type=int, default=600, required=False, help="How long should master process wait for other processes to finish.", ) parser.add_argument("--src_lang", type=str, default=None, required=False) parser.add_argument("--tgt_lang", type=str, default=None, required=False) parser.add_argument( "--prefix", type=str, required=False, default=None, help="will be added to the beginning of src examples" ) parser.add_argument("--fp16", action="store_true") parser.add_argument("--debug", action="store_true") start_time = time.time() args, rest = parser.parse_known_args() generate_kwargs = parse_numeric_n_bool_cl_kwargs(rest) if generate_kwargs and args.local_rank <= 0: print(f"parsed the following generate kwargs: {generate_kwargs}") json_save_dir = Path(args.save_dir + "_tmp") Path(json_save_dir).mkdir(exist_ok=True) # this handles locking. intermediate_files = list(json_save_dir.glob("rank_*.json")) if intermediate_files: raise ValueError(f"Found files at {json_save_dir} please move or remove them.") # In theory, a node could finish and save before another node hits this. If this happens, we can address later. dataset_kwargs = {} if args.src_lang is not None: dataset_kwargs["src_lang"] = args.src_lang if args.tgt_lang is not None: dataset_kwargs["tgt_lang"] = args.tgt_lang Path(args.save_dir).mkdir(exist_ok=True) results, num_replicas = eval_data_dir( args.data_dir, json_save_dir, args.model_name, type_path=args.type_path, bs=args.bs, fp16=args.fp16, task=args.task, local_rank=args.local_rank, n_obs=args.n_obs, max_source_length=args.max_source_length, num_return_sequences=args.num_return_sequences, prefix=args.prefix, dataset_kwargs=dataset_kwargs, **generate_kwargs, ) if args.local_rank <= 0: save_dir = Path(args.save_dir) save_dir.mkdir(exist_ok=True) partial_results = gather_results_from_each_node(num_replicas, json_save_dir, args.sync_timeout) preds = combine_partial_results(partial_results) if args.num_return_sequences > 1: save_path = save_dir.joinpath("pseudolabel_results.json") print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/") save_json(preds, save_path) return tgt_file = Path(args.data_dir).joinpath(args.type_path + ".target") with open(tgt_file) as f: labels = [x.rstrip() for x in f.readlines()][: len(preds)] # Calculate metrics, save metrics, and save _generations.txt calc_bleu = "translation" in args.task score_fn = calculate_bleu if calc_bleu else calculate_rouge metric_name = "bleu" if calc_bleu else "rouge" metrics: Dict = score_fn(preds, labels) metrics["n_obs"] = len(preds) runtime = time.time() - start_time metrics["seconds_per_sample"] = round(runtime / metrics["n_obs"], 4) metrics["n_gpus"] = num_replicas # TODO(@stas00): add whatever metadata to metrics metrics_save_path = save_dir.joinpath(f"{args.type_path}_{metric_name}.json") save_json(metrics, metrics_save_path, indent=None) print(metrics) write_txt_file(preds, save_dir.joinpath(f"{args.type_path}_generations.txt")) if args.debug: write_txt_file(labels, save_dir.joinpath(f"{args.type_path}.target")) else: shutil.rmtree(json_save_dir) def combine_partial_results(partial_results) -> List: """Concatenate partial results into one file, then sort it by id.""" records = [] for partial_result in partial_results: records.extend(partial_result) records = sorted(records, key=lambda x: x["id"]) preds = [x["pred"] for x in records] return preds def gather_results_from_each_node(num_replicas, save_dir, timeout) -> List[Dict[str, List]]: # WAIT FOR lots of .json files start_wait = time.time() logger.info("waiting for all nodes to finish") json_data = None while (time.time() - start_wait) < timeout: json_files = list(save_dir.glob("rank_*.json")) if len(json_files) < num_replicas: continue try: # make sure all json files are fully saved json_data = lmap(load_json, json_files) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes") # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
transformers/examples/legacy/seq2seq/run_distributed_eval.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/run_distributed_eval.py", "repo_id": "transformers", "token_count": 4164 }
# Note that llama and cohere have different definitions for rotate_half from transformers.models.cohere.modeling_cohere import rotate_half # noqa from transformers.models.llama.modeling_llama import LlamaAttention # When following LlamaAttention dependencies, we will grab the function `rotate_half` defined # in `modeling_llama.py`. But here we imported it explicitly from Cohere, so it should use Cohere's # definition instead class SwitchFunctionAttention(LlamaAttention): pass
transformers/examples/modular-transformers/modular_switch_function.py/0
{ "file_path": "transformers/examples/modular-transformers/modular_switch_function.py", "repo_id": "transformers", "token_count": 133 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version """ Pre-training a 🤗 ViT model as an MAE (masked autoencoder), as proposed in https://arxiv.org/abs/2111.06377.""" logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: Optional[str] = field( default="cifar10", metadata={"help": "Name of a dataset from the datasets package"} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) image_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of the images in the files."} ) train_dir: Optional[str] = field(default=None, metadata={"help": "A folder containing the training data."}) validation_dir: Optional[str] = field(default=None, metadata={"help": "A folder containing the validation data."}) train_val_split: Optional[float] = field( default=0.15, metadata={"help": "Percent to split off of train for validation."} ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) def __post_init__(self): data_files = {} if self.train_dir is not None: data_files["train"] = self.train_dir if self.validation_dir is not None: data_files["val"] = self.validation_dir self.data_files = data_files if data_files else None @dataclass class ModelArguments: """ Arguments pertaining to which model/config/image processor we are going to pre-train. """ model_name_or_path: str = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) mask_ratio: float = field( default=0.75, metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) norm_pix_loss: bool = field( default=True, metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class CustomTrainingArguments(TrainingArguments): base_learning_rate: float = field( default=1e-3, metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) return {"pixel_values": pixel_values} def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mae", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. ds = load_dataset( data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=data_args.trust_remote_code, ) # If we don't have a validation split, split off a percentage of train as validation. data_args.train_val_split = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = ds["train"].train_test_split(data_args.train_val_split) ds["train"] = split["train"] ds["validation"] = split["test"] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "token": model_args.token, } if model_args.config_name: config = ViTMAEConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = ViTMAEConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = ViTMAEConfig() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") # adapt config config.update( { "mask_ratio": model_args.mask_ratio, "norm_pix_loss": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: image_processor = ViTImageProcessor.from_pretrained(model_args.image_processor_name, **config_kwargs) elif model_args.model_name_or_path: image_processor = ViTImageProcessor.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: image_processor = ViTImageProcessor() # create model if model_args.model_name_or_path: model = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, ) else: logger.info("Training new model from scratch") model = ViTMAEForPreTraining(config) if training_args.do_train: column_names = ds["train"].column_names else: column_names = ds["validation"].column_names if data_args.image_column_name is not None: image_column_name = data_args.image_column_name elif "image" in column_names: image_column_name = "image" elif "img" in column_names: image_column_name = "img" else: image_column_name = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: size = image_processor.size["shortest_edge"] else: size = (image_processor.size["height"], image_processor.size["width"]) transforms = Compose( [ Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img), RandomResizedCrop(size, scale=(0.2, 1.0), interpolation=InterpolationMode.BICUBIC), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean, std=image_processor.image_std), ] ) def preprocess_images(examples): """Preprocess a batch of images by applying transforms.""" examples["pixel_values"] = [transforms(image) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset") if data_args.max_train_samples is not None: ds["train"] = ds["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) # Set the training transforms ds["train"].set_transform(preprocess_images) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset") if data_args.max_eval_samples is not None: ds["validation"] = ( ds["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms ds["validation"].set_transform(preprocess_images) # Compute absolute learning rate total_train_batch_size = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: training_args.learning_rate = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=ds["train"] if training_args.do_train else None, eval_dataset=ds["validation"] if training_args.do_eval else None, processing_class=image_processor, data_collator=collate_fn, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Write model card and (optionally) push to hub kwargs = { "tasks": "masked-auto-encoding", "dataset": data_args.dataset_name, "tags": ["masked-auto-encoding"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/pytorch/image-pretraining/run_mae.py/0
{ "file_path": "transformers/examples/pytorch/image-pretraining/run_mae.py", "repo_id": "transformers", "token_count": 6332 }
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Multiple Choice ## Fine-tuning on SWAG with the Trainer `run_swag` allows you to fine-tune any model from our [hub](https://huggingface.co/models) (as long as its architecture as a `ForMultipleChoice` version in the library) on the SWAG dataset or your own csv/jsonlines files as long as they are structured the same way. To make it works on another dataset, you will need to tweak the `preprocess_function` inside the script. ```bash python examples/multiple-choice/run_swag.py \ --model_name_or_path FacebookAI/roberta-base \ --do_train \ --do_eval \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/swag_base \ --per_device_eval_batch_size=16 \ --per_device_train_batch_size=16 \ --overwrite_output ``` Training with the defined hyper-parameters yields the following results: ``` ***** Eval results ***** eval_acc = 0.8338998300509847 eval_loss = 0.44457291918821606 ``` ## With Accelerate Based on the script [run_swag_no_trainer.py](https://github.com/huggingface/transformers/blob/main/examples/pytorch/multiple-choice/run_swag_no_trainer.py). Like `run_swag.py`, this script allows you to fine-tune any of the models on the [hub](https://huggingface.co/models) (as long as its architecture as a `ForMultipleChoice` version in the library) on the SWAG dataset or your own data in a csv or a JSON file. The main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. It offers less options than the script with `Trainer` (but you can easily change the options for the optimizer or the dataloaders directly in the script) but still run in a distributed setup, on TPU and supports mixed precision by the mean of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally after installing it: ```bash pip install git+https://github.com/huggingface/accelerate ``` then ```bash export DATASET_NAME=swag python run_swag_no_trainer.py \ --model_name_or_path google-bert/bert-base-cased \ --dataset_name $DATASET_NAME \ --max_seq_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$DATASET_NAME/ ``` You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run ```bash accelerate config ``` and reply to the questions asked. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash export DATASET_NAME=swag accelerate launch run_swag_no_trainer.py \ --model_name_or_path google-bert/bert-base-cased \ --dataset_name $DATASET_NAME \ --max_seq_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$DATASET_NAME/ ``` This command is the same and will work for: - a CPU-only setup - a setup with one GPU - a distributed training with several GPUs (single or multi node) - a training on TPUs Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
transformers/examples/pytorch/multiple-choice/README.md/0
{ "file_path": "transformers/examples/pytorch/multiple-choice/README.md", "repo_id": "transformers", "token_count": 1170 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library's seq2seq models for question answering using the 🤗 Seq2SeqTrainer. """ # You can also adapt this script on your own question answering task. Pointers for this are left as comments. import logging import os import sys from dataclasses import dataclass, field from typing import List, Optional, Tuple import datasets import evaluate import numpy as np from datasets import load_dataset from trainer_seq2seq_qa import QuestionAnsweringSeq2SeqTrainer import transformers from transformers import ( AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, HfArgumentParser, Seq2SeqTrainingArguments, set_seed, ) from transformers.trainer_utils import EvalLoopOutput, EvalPrediction, get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) context_column: Optional[str] = field( default="context", metadata={"help": "The name of the column in the datasets containing the contexts (for question answering)."}, ) question_column: Optional[str] = field( default="question", metadata={"help": "The name of the column in the datasets containing the questions (for question answering)."}, ) answer_column: Optional[str] = field( default="answers", metadata={"help": "The name of the column in the datasets containing the answers (for question answering)."}, ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_seq_length: int = field( default=384, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_answer_length: int = field( default=30, metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) }, ) val_max_answer_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_answer_length`. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when" " batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU)." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) version_2_with_negative: bool = field( default=False, metadata={"help": "If true, some of the examples do not have an answer."} ) null_score_diff_threshold: float = field( default=0.0, metadata={ "help": ( "The threshold used to select the null answer: if the best answer has a score that is less than " "the score of the null answer minus this threshold, the null answer is selected for this example. " "Only useful when `version_2_with_negative=True`." ) }, ) doc_stride: int = field( default=128, metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, ) n_best_size: int = field( default=20, metadata={"help": "The total number of n-best predictions to generate when looking for an answer."}, ) num_beams: Optional[int] = field( default=None, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " "which is used during ``evaluate`` and ``predict``." ) }, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={ "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." }, ) def __post_init__(self): if ( self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None ): raise ValueError("Need either a dataset name or a training/validation file/test_file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.test_file is not None: extension = self.test_file.split(".")[-1] assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." if self.val_max_answer_length is None: self.val_max_answer_length = self.max_answer_length question_answering_column_name_mapping = { "squad_v2": ("question", "context", "answer"), } def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_seq2seq_qa", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") # Preprocessing the datasets. # We need to generate and tokenize inputs and targets. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names elif training_args.do_predict: column_names = raw_datasets["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # Get the column names for input/target. dataset_columns = question_answering_column_name_mapping.get(data_args.dataset_name, None) if data_args.question_column is None: question_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: question_column = data_args.question_column if question_column not in column_names: raise ValueError( f"--question_column' value '{data_args.question_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.context_column is None: context_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: context_column = data_args.context_column if context_column not in column_names: raise ValueError( f"--context_column' value '{data_args.context_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.answer_column is None: answer_column = dataset_columns[2] if dataset_columns is not None else column_names[2] else: answer_column = data_args.answer_column if answer_column not in column_names: raise ValueError( f"--answer_column' value '{data_args.answer_column}' needs to be one of: {', '.join(column_names)}" ) # Temporarily set max_answer_length for training. max_answer_length = data_args.max_answer_length padding = "max_length" if data_args.pad_to_max_length else False if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): logger.warning( "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for " f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" ) if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_squad_batch( examples, question_column: str, context_column: str, answer_column: str, ) -> Tuple[List[str], List[str]]: questions = examples[question_column] contexts = examples[context_column] answers = examples[answer_column] def generate_input(_question, _context): return " ".join(["question:", _question.lstrip(), "context:", _context.lstrip()]) inputs = [generate_input(question, context) for question, context in zip(questions, contexts)] targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers] return inputs, targets def preprocess_function(examples): inputs, targets = preprocess_squad_batch(examples, question_column, context_column, answer_column) model_inputs = tokenizer(inputs, max_length=max_seq_length, padding=padding, truncation=True) # Tokenize targets with text_target=... labels = tokenizer(text_target=targets, max_length=max_answer_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs # Validation preprocessing def preprocess_validation_function(examples): inputs, targets = preprocess_squad_batch(examples, question_column, context_column, answer_column) model_inputs = tokenizer( inputs, max_length=max_seq_length, padding=padding, truncation=True, return_overflowing_tokens=True, return_offsets_mapping=True, ) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_answer_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = model_inputs.pop("overflow_to_sample_mapping") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. model_inputs["example_id"] = [] # Augment the overflowing tokens to the labels labels_out = [] for i in range(len(model_inputs["input_ids"])): # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] model_inputs["example_id"].append(examples["id"][sample_index]) labels_out.append(labels["input_ids"][sample_index]) model_inputs["labels"] = labels_out return model_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: # We will select sample from whole data if argument is specified max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Create train feature from dataset with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if data_args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_examples = raw_datasets["validation"] if data_args.max_eval_samples is not None: # We will select sample from whole data max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) eval_examples = eval_examples.select(range(max_eval_samples)) # Validation Feature Creation with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_examples.map( preprocess_validation_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if data_args.max_eval_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_examples = raw_datasets["test"] if data_args.max_predict_samples is not None: # We will select sample from whole data predict_examples = predict_examples.select(range(data_args.max_predict_samples)) # Predict Feature Creation with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_examples.map( preprocess_validation_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) if data_args.max_predict_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Data collator label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) metric = evaluate.load( "squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir ) def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) # Post-processing: def post_processing_function( examples: datasets.Dataset, features: datasets.Dataset, outputs: EvalLoopOutput, stage="eval" ): # Decode the predicted tokens. preds = outputs.predictions if isinstance(preds, tuple): preds = preds[0] # Replace -100s used for padding as we can't decode them preds = np.where(preds != -100, preds, tokenizer.pad_token_id) decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} feature_per_example = {example_id_to_index[feature["example_id"]]: i for i, feature in enumerate(features)} predictions = {} # Let's loop over all the examples! for example_index, example in enumerate(examples): # This is the index of the feature associated to the current example. feature_index = feature_per_example[example_index] predictions[example["id"]] = decoded_preds[feature_index] # Format the result to the format the metric expects. if data_args.version_2_with_negative: formatted_predictions = [ {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() ] else: formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] references = [{"id": ex["id"], "answers": ex[answer_column]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) # Initialize our Trainer trainer = QuestionAnsweringSeq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, eval_examples=eval_examples if training_args.do_eval else None, processing_class=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, post_process_function=post_processing_function, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation results = {} max_length = ( training_args.generation_max_length if training_args.generation_max_length is not None else data_args.val_max_answer_length ) num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix="eval") max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Prediction if training_args.do_predict: logger.info("*** Predict ***") results = trainer.predict(predict_dataset, predict_examples) metrics = results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name trainer.push_to_hub(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/pytorch/question-answering/run_seq2seq_qa.py/0
{ "file_path": "transformers/examples/pytorch/question-answering/run_seq2seq_qa.py", "repo_id": "transformers", "token_count": 13281 }
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> ## Summarization This directory contains examples for finetuning and evaluating transformers on summarization tasks. Please tag @patil-suraj with any issues/unexpected behaviors, or send a PR! For deprecated `bertabs` instructions, see [`bertabs/README.md`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/bertabs/README.md). For the old `finetune_trainer.py` and related utils, see [`examples/legacy/seq2seq`](https://github.com/huggingface/transformers/blob/main/examples/legacy/seq2seq). ### Supported Architectures - `BartForConditionalGeneration` - `FSMTForConditionalGeneration` (translation only) - `MBartForConditionalGeneration` - `MarianMTModel` - `PegasusForConditionalGeneration` - `T5ForConditionalGeneration` - `MT5ForConditionalGeneration` `run_summarization.py` is a lightweight example of how to download and preprocess a dataset from the [🤗 Datasets](https://github.com/huggingface/datasets) library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it. For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets#json-files and you also will find examples of these below. ## With Trainer Here is an example on a summarization task: ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` Only T5 models `google-t5/t5-small`, `google-t5/t5-base`, `google-t5/t5-large`, `google-t5/t5-3b` and `google-t5/t5-11b` must use an additional argument: `--source_prefix "summarize: "`. We used CNN/DailyMail dataset in this example as `google-t5/t5-small` was trained on it and one can get good scores even when pre-training with a very small sample. Extreme Summarization (XSum) Dataset is another commonly used dataset for the task of summarization. To use it replace `--dataset_name cnn_dailymail --dataset_config "3.0.0"` with `--dataset_name xsum`. And here is how you would use it on your own files, after adjusting the values for the arguments `--train_file`, `--validation_file`, `--text_column` and `--summary_column` to match your setup: ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --train_file path_to_csv_or_jsonlines_file \ --validation_file path_to_csv_or_jsonlines_file \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate ``` The task of summarization supports custom CSV and JSONLINES formats. #### Custom CSV Files If it's a csv file the training and validation files should have a column for the inputs texts and a column for the summaries. If the csv file has just two columns as in the following example: ```csv text,summary "I'm sitting here in a boring room. It's just another rainy Sunday afternoon. I'm wasting my time I got nothing to do. I'm hanging around I'm waiting for you. But nothing ever happens. And I wonder","I'm sitting in a room where I'm waiting for something to happen" "I see trees so green, red roses too. I see them bloom for me and you. And I think to myself what a wonderful world. I see skies so blue and clouds so white. The bright blessed day, the dark sacred night. And I think to myself what a wonderful world.","I'm a gardener and I'm a big fan of flowers." "Christmas time is here. Happiness and cheer. Fun for all that children call. Their favorite time of the year. Snowflakes in the air. Carols everywhere. Olden times and ancient rhymes. Of love and dreams to share","It's that time of year again." ``` The first column is assumed to be for `text` and the second is for summary. If the csv file has multiple columns, you can then specify the names of the columns to use: ```bash --text_column text_column_name \ --summary_column summary_column_name \ ``` For example if the columns were: ```csv id,date,text,summary ``` and you wanted to select only `text` and `summary`, then you'd pass these additional arguments: ```bash --text_column text \ --summary_column summary \ ``` #### Custom JSONLINES Files The second supported format is jsonlines. Here is an example of a jsonlines custom data file. ```json {"text": "I'm sitting here in a boring room. It's just another rainy Sunday afternoon. I'm wasting my time I got nothing to do. I'm hanging around I'm waiting for you. But nothing ever happens. And I wonder", "summary": "I'm sitting in a room where I'm waiting for something to happen"} {"text": "I see trees so green, red roses too. I see them bloom for me and you. And I think to myself what a wonderful world. I see skies so blue and clouds so white. The bright blessed day, the dark sacred night. And I think to myself what a wonderful world.", "summary": "I'm a gardener and I'm a big fan of flowers."} {"text": "Christmas time is here. Happiness and cheer. Fun for all that children call. Their favorite time of the year. Snowflakes in the air. Carols everywhere. Olden times and ancient rhymes. Of love and dreams to share", "summary": "It's that time of year again."} ``` Same as with the CSV files, by default the first value will be used as the text record and the second as the summary record. Therefore you can use any key names for the entries, in this example `text` and `summary` were used. And as with the CSV files, you can specify which values to select from the file, by explicitly specifying the corresponding key names. In our example this again would be: ```bash --text_column text \ --summary_column summary \ ``` ## With Accelerate Based on the script [`run_summarization_no_trainer.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py). Like `run_summarization.py`, this script allows you to fine-tune any of the models supported on a summarization task, the main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. It offers less options than the script with `Trainer` (for instance you can easily change the options for the optimizer or the dataloaders directly in the script) but still run in a distributed setup, on TPU and supports mixed precision by the mean of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally after installing it: ```bash pip install git+https://github.com/huggingface/accelerate ``` then ```bash python run_summarization_no_trainer.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir ~/tmp/tst-summarization ``` You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run ```bash accelerate config ``` and reply to the questions asked. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash accelerate launch run_summarization_no_trainer.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir ~/tmp/tst-summarization ``` This command is the same and will work for: - a CPU-only setup - a setup with one GPU - a distributed training with several GPUs (single or multi node) - a training on TPUs Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
transformers/examples/pytorch/summarization/README.md/0
{ "file_path": "transformers/examples/pytorch/summarization/README.md", "repo_id": "transformers", "token_count": 2636 }
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Token classification ## PyTorch version Fine-tuning the library models for token classification task such as Named Entity Recognition (NER), Parts-of-speech tagging (POS) or phrase extraction (CHUNKS). The main scrip `run_ner.py` leverages the 🤗 Datasets library and the Trainer API. You can easily customize it to your needs if you need extra processing on your datasets. It will either run on a datasets hosted on our [hub](https://huggingface.co/datasets) or with your own text files for training and validation, you might just need to add some tweaks in the data preprocessing. ### Using your own data If you use your own data, the script expects the following format of the data - ```bash { "chunk_tags": [11, 12, 12, 21, 13, 11, 11, 21, 13, 11, 12, 13, 11, 21, 22, 11, 12, 17, 11, 21, 17, 11, 12, 12, 21, 22, 22, 13, 11, 0], "id": "0", "ner_tags": [0, 3, 4, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "pos_tags": [12, 22, 22, 38, 15, 22, 28, 38, 15, 16, 21, 35, 24, 35, 37, 16, 21, 15, 24, 41, 15, 16, 21, 21, 20, 37, 40, 35, 21, 7], "tokens": ["The", "European", "Commission", "said", "on", "Thursday", "it", "disagreed", "with", "German", "advice", "to", "consumers", "to", "shun", "British", "lamb", "until", "scientists", "determine", "whether", "mad", "cow", "disease", "can", "be", "transmitted", "to", "sheep", "."] } ``` The following example fine-tunes BERT on CoNLL-2003: ```bash python run_ner.py \ --model_name_or_path google-bert/bert-base-uncased \ --dataset_name conll2003 \ --output_dir /tmp/test-ner \ --do_train \ --do_eval ``` or just can just run the bash script `run.sh`. To run on your own training and validation files, use the following command: ```bash python run_ner.py \ --model_name_or_path google-bert/bert-base-uncased \ --train_file path_to_train_file \ --validation_file path_to_validation_file \ --output_dir /tmp/test-ner \ --do_train \ --do_eval ``` **Note:** This script only works with models that have a fast tokenizer (backed by the 🤗 Tokenizers library) as it uses special features of those tokenizers. You can check if your favorite model has a fast tokenizer in [this table](https://huggingface.co/transformers/index.html#supported-frameworks), if it doesn't you can still use the old version of the script. > If your model classification head dimensions do not fit the number of labels in the dataset, you can specify `--ignore_mismatched_sizes` to adapt it. ## Old version of the script You can find the old version of the PyTorch script [here](https://github.com/huggingface/transformers/blob/main/examples/legacy/token-classification/run_ner.py). ## Pytorch version, no Trainer Based on the script [run_ner_no_trainer.py](https://github.com/huggingface/transformers/blob/main/examples/pytorch/token-classification/run_ner_no_trainer.py). Like `run_ner.py`, this script allows you to fine-tune any of the models on the [hub](https://huggingface.co/models) on a token classification task, either NER, POS or CHUNKS tasks or your own data in a csv or a JSON file. The main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. It offers less options than the script with `Trainer` (for instance you can easily change the options for the optimizer or the dataloaders directly in the script) but still run in a distributed setup, on TPU and supports mixed precision by the mean of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally after installing it: ```bash pip install git+https://github.com/huggingface/accelerate ``` then ```bash export TASK_NAME=ner python run_ner_no_trainer.py \ --model_name_or_path google-bert/bert-base-cased \ --dataset_name conll2003 \ --task_name $TASK_NAME \ --max_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ ``` You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run ```bash accelerate config ``` and reply to the questions asked. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash export TASK_NAME=ner accelerate launch run_ner_no_trainer.py \ --model_name_or_path google-bert/bert-base-cased \ --dataset_name conll2003 \ --task_name $TASK_NAME \ --max_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ ``` This command is the same and will work for: - a CPU-only setup - a setup with one GPU - a distributed training with several GPUs (single or multi node) - a training on TPUs Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
transformers/examples/pytorch/token-classification/README.md/0
{ "file_path": "transformers/examples/pytorch/token-classification/README.md", "repo_id": "transformers", "token_count": 1813 }
# Patience-based Early Exit Patience-based Early Exit (PABEE) is a plug-and-play inference method for pretrained language models. We have already implemented it on BERT and ALBERT. Basically, you can make your LM faster and more robust with PABEE. It can even improve the performance of ALBERT on GLUE. The only sacrifice is that the batch size can only be 1. Learn more in the paper ["BERT Loses Patience: Fast and Robust Inference with Early Exit"](https://arxiv.org/abs/2006.04152) and the official [GitHub repo](https://github.com/JetRunner/PABEE). ![PABEE](https://github.com/JetRunner/PABEE/raw/master/bert-loses-patience.png) ## Training You can fine-tune a pretrained language model (you can choose from BERT and ALBERT) and train the internal classifiers by: ```bash export GLUE_DIR=/path/to/glue_data export TASK_NAME=MRPC python ./run_glue_with_pabee.py \ --model_type albert \ --model_name_or_path google-bert/bert-base-uncased/albert/albert-base-v2 \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --do_lower_case \ --data_dir "$GLUE_DIR/$TASK_NAME" \ --max_seq_length 128 \ --per_gpu_train_batch_size 32 \ --per_gpu_eval_batch_size 32 \ --learning_rate 2e-5 \ --save_steps 50 \ --logging_steps 50 \ --num_train_epochs 5 \ --output_dir /path/to/save/ \ --evaluate_during_training ``` ## Inference You can inference with different patience settings by: ```bash export GLUE_DIR=/path/to/glue_data export TASK_NAME=MRPC python ./run_glue_with_pabee.py \ --model_type albert \ --model_name_or_path /path/to/save/ \ --task_name $TASK_NAME \ --do_eval \ --do_lower_case \ --data_dir "$GLUE_DIR/$TASK_NAME" \ --max_seq_length 128 \ --per_gpu_eval_batch_size 1 \ --learning_rate 2e-5 \ --logging_steps 50 \ --num_train_epochs 15 \ --output_dir /path/to/save/ \ --eval_all_checkpoints \ --patience 3,4,5,6,7,8 ``` where `patience` can be a list of patience settings, separated by a comma. It will help determine which patience works best. When evaluating on a regression task (STS-B), you may add `--regression_threshold 0.1` to define the regression threshold. ## Results On the GLUE dev set: | Model | \#Param | Speed | CoLA | MNLI | MRPC | QNLI | QQP | RTE | SST\-2 | STS\-B | |--------------|---------|--------|-------|-------|-------|-------|-------|-------|--------|--------| | ALBERT\-base | 12M | | 58\.9 | 84\.6 | 89\.5 | 91\.7 | 89\.6 | 78\.6 | 92\.8 | 89\.5 | | \+PABEE | 12M | 1\.57x | 61\.2 | 85\.1 | 90\.0 | 91\.8 | 89\.6 | 80\.1 | 93\.0 | 90\.1 | | Model | \#Param | Speed\-up | MNLI | SST\-2 | STS\-B | |---------------|---------|-----------|-------|--------|--------| | BERT\-base | 108M | | 84\.5 | 92\.1 | 88\.9 | | \+PABEE | 108M | 1\.62x | 83\.6 | 92\.0 | 88\.7 | | ALBERT\-large | 18M | | 86\.4 | 94\.9 | 90\.4 | | \+PABEE | 18M | 2\.42x | 86\.8 | 95\.2 | 90\.6 | ## Citation If you find this resource useful, please consider citing the following paper: ```bibtex @misc{zhou2020bert, title={BERT Loses Patience: Fast and Robust Inference with Early Exit}, author={Wangchunshu Zhou and Canwen Xu and Tao Ge and Julian McAuley and Ke Xu and Furu Wei}, year={2020}, eprint={2006.04152}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
transformers/examples/research_projects/bert-loses-patience/README.md/0
{ "file_path": "transformers/examples/research_projects/bert-loses-patience/README.md", "repo_id": "transformers", "token_count": 1329 }
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The distiller to distil the student. Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import math import os import time import psutil import torch from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups from lm_seqs_dataset import LmSeqsDataset from torch import nn from torch.optim import AdamW from torch.utils.data import BatchSampler, DataLoader, RandomSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm from transformers import get_linear_schedule_with_warmup from utils import logger try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter class Distiller: def __init__( self, params: dict, dataset: LmSeqsDataset, token_probs: torch.tensor, student: nn.Module, teacher: nn.Module ): logger.info("Initializing Distiller") self.params = params self.dump_path = params.dump_path self.multi_gpu = params.multi_gpu self.fp16 = params.fp16 self.student = student self.teacher = teacher self.student_config = student.config self.vocab_size = student.config.vocab_size if params.n_gpu <= 1: sampler = RandomSampler(dataset) else: sampler = DistributedSampler(dataset) if params.group_by_size: groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size) sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size) else: sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False) self.dataloader = DataLoader(dataset=dataset, batch_sampler=sampler, collate_fn=dataset.batch_sequences) self.temperature = params.temperature assert self.temperature > 0.0 self.alpha_ce = params.alpha_ce self.alpha_mlm = params.alpha_mlm self.alpha_clm = params.alpha_clm self.alpha_mse = params.alpha_mse self.alpha_cos = params.alpha_cos self.mlm = params.mlm if self.mlm: logger.info("Using MLM loss for LM step.") self.mlm_mask_prop = params.mlm_mask_prop assert 0.0 <= self.mlm_mask_prop <= 1.0 assert params.word_mask + params.word_keep + params.word_rand == 1.0 self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand]) self.pred_probs = self.pred_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else self.pred_probs self.token_probs = token_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else token_probs if self.fp16: self.pred_probs = self.pred_probs.half() self.token_probs = self.token_probs.half() else: logger.info("Using CLM loss for LM step.") self.epoch = 0 self.n_iter = 0 self.n_total_iter = 0 self.n_sequences_epoch = 0 self.total_loss_epoch = 0 self.last_loss = 0 self.last_loss_ce = 0 self.last_loss_mlm = 0 self.last_loss_clm = 0 if self.alpha_mse > 0.0: self.last_loss_mse = 0 if self.alpha_cos > 0.0: self.last_loss_cos = 0 self.last_log = 0 self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-100) if self.alpha_mse > 0.0: self.mse_loss_fct = nn.MSELoss(reduction="sum") if self.alpha_cos > 0.0: self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction="mean") logger.info("--- Initializing model optimizer") assert params.gradient_accumulation_steps >= 1 self.num_steps_epoch = len(self.dataloader) num_train_optimization_steps = ( int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1 ) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad ], "weight_decay": params.weight_decay, }, { "params": [ p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad ], "weight_decay": 0.0, }, ] logger.info( "------ Number of trainable parameters (student): %i" % sum([p.numel() for p in self.student.parameters() if p.requires_grad]) ) logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()])) self.optimizer = AdamW( optimizer_grouped_parameters, lr=params.learning_rate, eps=params.adam_epsilon, betas=(0.9, 0.98) ) warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop) self.scheduler = get_linear_schedule_with_warmup( self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps ) if self.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level") self.student, self.optimizer = amp.initialize( self.student, self.optimizer, opt_level=self.params.fp16_opt_level ) self.teacher = self.teacher.half() if self.multi_gpu: if self.fp16: from apex.parallel import DistributedDataParallel logger.info("Using apex.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel(self.student) else: from torch.nn.parallel import DistributedDataParallel logger.info("Using nn.parallel.DistributedDataParallel for distributed training.") self.student = DistributedDataParallel( self.student, device_ids=[params.local_rank], output_device=params.local_rank, find_unused_parameters=True, ) self.is_master = params.is_master if self.is_master: logger.info("--- Initializing Tensorboard") self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, "log", "train")) self.tensorboard.add_text(tag="config/training", text_string=str(self.params), global_step=0) self.tensorboard.add_text(tag="config/student", text_string=str(self.student_config), global_step=0) def prepare_batch_mlm(self, batch): """ Prepare the batch: from the token_ids and the lengths, compute the attention mask and the masked label for MLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. mlm_labels: `torch.tensor(bs, seq_length)` - The masked language modeling labels. There is a -100 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] bs, max_seq_len = token_ids.size() mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids) x_prob = self.token_probs[token_ids.flatten()] n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item()) tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False) pred_mask = torch.zeros( bs * max_seq_len, dtype=torch.bool, device=token_ids.device ) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility pred_mask[tgt_ids] = 1 pred_mask = pred_mask.view(bs, max_seq_len) pred_mask[token_ids == self.params.special_tok_ids["pad_token"]] = 0 # mask a number of words == 0 [8] (faster with fp16) if self.fp16: n1 = pred_mask.sum().item() if n1 > 8: pred_mask = pred_mask.view(-1) n2 = max(n1 % 8, 8 * (n1 // 8)) if n2 != n1: pred_mask[torch.nonzero(pred_mask).view(-1)[: n1 - n2]] = 0 pred_mask = pred_mask.view(bs, max_seq_len) assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item() _token_ids_real = token_ids[pred_mask] _token_ids_rand = _token_ids_real.clone().random_(self.vocab_size) _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids["mask_token"]) probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True) _token_ids = ( _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long() ) token_ids = token_ids.masked_scatter(pred_mask, _token_ids) mlm_labels[~pred_mask] = -100 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, mlm_labels def prepare_batch_clm(self, batch): """ Prepare the batch: from the token_ids and the lengths, compute the attention mask and the labels for CLM. Input: ------ batch: `Tuple` token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. Output: ------- token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. clm_labels: `torch.tensor(bs, seq_length)` - The causal language modeling labels. There is a -100 where there is nothing to predict. """ token_ids, lengths = batch token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) assert token_ids.size(0) == lengths.size(0) attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] clm_labels = token_ids.new(token_ids.size()).copy_(token_ids) clm_labels[~attn_mask] = -100 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility # sanity checks assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size return token_ids, attn_mask, clm_labels def round_batch(self, x: torch.tensor, lengths: torch.tensor): """ For float16 only. Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8. Input: ------ x: `torch.tensor(bs, seq_length)` - The token ids. lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch. Output: ------- x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids. lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths. """ if not self.fp16 or len(lengths) < 8: return x, lengths # number of sentences == 0 [8] bs1 = len(lengths) bs2 = 8 * (bs1 // 8) assert bs2 > 0 and bs2 % 8 == 0 if bs1 != bs2: idx = torch.randperm(bs1)[:bs2] lengths = lengths[idx] slen = lengths.max().item() x = x[idx, :slen] else: idx = None # sequence length == 0 [8] ml1 = x.size(1) if ml1 % 8 != 0: pad = 8 - (ml1 % 8) ml2 = ml1 + pad if self.mlm: pad_id = self.params.special_tok_ids["pad_token"] else: pad_id = self.params.special_tok_ids["unk_token"] padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id) x = torch.cat([x, padding_tensor], 1) assert x.size() == (bs2, ml2) assert x.size(0) % 8 == 0 assert x.size(1) % 8 == 0 return x, lengths def train(self): """ The real training loop. """ if self.is_master: logger.info("Starting training") self.last_log = time.time() self.student.train() self.teacher.eval() for _ in range(self.params.n_epoch): if self.is_master: logger.info(f"--- Starting epoch {self.epoch}/{self.params.n_epoch-1}") if self.multi_gpu: torch.distributed.barrier() iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0]) for batch in iter_bar: if self.params.n_gpu > 0: batch = tuple(t.to(f"cuda:{self.params.local_rank}") for t in batch) if self.mlm: token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch) else: token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch) self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels) iter_bar.update() iter_bar.set_postfix( {"Last_loss": f"{self.last_loss:.2f}", "Avg_cum_loss": f"{self.total_loss_epoch/self.n_iter:.2f}"} ) iter_bar.close() if self.is_master: logger.info(f"--- Ending epoch {self.epoch}/{self.params.n_epoch-1}") self.end_epoch() if self.is_master: logger.info("Save very last checkpoint as `pytorch_model.bin`.") self.save_checkpoint(checkpoint_name="pytorch_model.bin") logger.info("Training is finished") def step(self, input_ids: torch.tensor, attention_mask: torch.tensor, lm_labels: torch.tensor): """ One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation), and possibly a parameter update (depending on the gradient accumulation). Input: ------ input_ids: `torch.tensor(bs, seq_length)` - The token ids. attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention. lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM). """ if self.mlm: student_outputs = self.student( input_ids=input_ids, attention_mask=attention_mask ) # (bs, seq_length, voc_size) with torch.no_grad(): teacher_outputs = self.teacher( input_ids=input_ids, attention_mask=attention_mask ) # (bs, seq_length, voc_size) else: student_outputs = self.student(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) with torch.no_grad(): teacher_outputs = self.teacher(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) s_logits, s_hidden_states = student_outputs["logits"], student_outputs["hidden_states"] t_logits, t_hidden_states = teacher_outputs["logits"], teacher_outputs["hidden_states"] assert s_logits.size() == t_logits.size() # https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100 # https://github.com/peterliht/knowledge-distillation-pytorch/issues/2 if self.params.restrict_ce_to_mask: mask = (lm_labels > -1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size) else: mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size) s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask assert t_logits_slct.size() == s_logits_slct.size() loss_ce = ( self.ce_loss_fct( nn.functional.log_softmax(s_logits_slct / self.temperature, dim=-1), nn.functional.softmax(t_logits_slct / self.temperature, dim=-1), ) * (self.temperature) ** 2 ) loss = self.alpha_ce * loss_ce if self.alpha_mlm > 0.0: loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1)) loss += self.alpha_mlm * loss_mlm if self.alpha_clm > 0.0: shift_logits = s_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss += self.alpha_clm * loss_clm if self.alpha_mse > 0.0: loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct) / s_logits_slct.size( 0 ) # Reproducing batchmean reduction loss += self.alpha_mse * loss_mse if self.alpha_cos > 0.0: s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim) t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim) mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim) assert s_hidden_states.size() == t_hidden_states.size() dim = s_hidden_states.size(-1) s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim) s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim) t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,) loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target) loss += self.alpha_cos * loss_cos self.total_loss_epoch += loss.item() self.last_loss = loss.item() self.last_loss_ce = loss_ce.item() if self.alpha_mlm > 0.0: self.last_loss_mlm = loss_mlm.item() if self.alpha_clm > 0.0: self.last_loss_clm = loss_clm.item() if self.alpha_mse > 0.0: self.last_loss_mse = loss_mse.item() if self.alpha_cos > 0.0: self.last_loss_cos = loss_cos.item() self.optimize(loss) self.n_sequences_epoch += input_ids.size(0) def optimize(self, loss): """ Normalization on the loss (gradient accumulation or distributed training), followed by backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation). Also update the metrics for tensorboard. """ # Check for NaN if (loss != loss).data.any(): logger.error("NaN detected") exit() if self.multi_gpu: loss = loss.mean() if self.params.gradient_accumulation_steps > 1: loss = loss / self.params.gradient_accumulation_steps if self.fp16: from apex import amp with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() self.iter() if self.n_iter % self.params.gradient_accumulation_steps == 0: if self.fp16: nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm) else: nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm) self.optimizer.step() self.optimizer.zero_grad() self.scheduler.step() def iter(self): """ Update global counts, write to tensorboard and save checkpoint. """ self.n_iter += 1 self.n_total_iter += 1 if self.n_total_iter % self.params.log_interval == 0: self.log_tensorboard() self.last_log = time.time() if self.n_total_iter % self.params.checkpoint_interval == 0: self.save_checkpoint() def log_tensorboard(self): """ Log into tensorboard. Only by the master process. """ if not self.is_master: return for param_name, param in self.student.named_parameters(): self.tensorboard.add_scalar( tag="parameter_mean/" + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="parameter_std/" + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter ) if param.grad is None: continue self.tensorboard.add_scalar( tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="losses/cum_avg_loss_epoch", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.n_total_iter, ) self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter) self.tensorboard.add_scalar( tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter ) if self.alpha_mlm > 0.0: self.tensorboard.add_scalar( tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter ) if self.alpha_clm > 0.0: self.tensorboard.add_scalar( tag="losses/loss_clm", scalar_value=self.last_loss_clm, global_step=self.n_total_iter ) if self.alpha_mse > 0.0: self.tensorboard.add_scalar( tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter ) if self.alpha_cos > 0.0: self.tensorboard.add_scalar( tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter ) self.tensorboard.add_scalar( tag="global/memory_usage", scalar_value=psutil.virtual_memory()._asdict()["used"] / 1_000_000, global_step=self.n_total_iter, ) self.tensorboard.add_scalar( tag="global/speed", scalar_value=time.time() - self.last_log, global_step=self.n_total_iter ) def end_epoch(self): """ Finally arrived at the end of epoch (full pass on dataset). Do some tensorboard logging and checkpoint saving. """ logger.info(f"{self.n_sequences_epoch} sequences have been trained during this epoch.") if self.is_master: self.save_checkpoint(checkpoint_name=f"model_epoch_{self.epoch}.pth") self.tensorboard.add_scalar( tag="epoch/loss", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.epoch ) self.epoch += 1 self.n_sequences_epoch = 0 self.n_iter = 0 self.total_loss_epoch = 0 def save_checkpoint(self, checkpoint_name: str = "checkpoint.pth"): """ Save the current state. Only by the master process. """ if not self.is_master: return mdl_to_save = self.student.module if hasattr(self.student, "module") else self.student mdl_to_save.config.save_pretrained(self.dump_path) state_dict = mdl_to_save.state_dict() torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
transformers/examples/research_projects/distillation/distiller.py/0
{ "file_path": "transformers/examples/research_projects/distillation/distiller.py", "repo_id": "transformers", "token_count": 12492 }
<p align="center"> <img src="http://sayef.tech:8082/uploads/FSNER-LOGO-2.png" alt="FSNER LOGO"> </p> <p align="center"> Implemented by <a href="https://huggingface.co/sayef"> sayef </a>. </p> ## Overview The FSNER model was proposed in [Example-Based Named Entity Recognition](https://arxiv.org/abs/2008.10570) by Morteza Ziyadi, Yuting Sun, Abhishek Goswami, Jade Huang, Weizhu Chen. To identify entity spans in a new domain, it uses a train-free few-shot learning approach inspired by question-answering. ## Abstract ---- > We present a novel approach to named entity recognition (NER) in the presence of scarce data that we call example-based NER. Our train-free few-shot learning approach takes inspiration from question-answering to identify entity spans in a new and unseen domain. In comparison with the current state-of-the-art, the proposed method performs significantly better, especially when using a low number of support examples. ## Model Training Details ----- | identifier | epochs | datasets | | ---------- |:----------:| :-----:| | [sayef/fsner-bert-base-uncased](https://huggingface.co/sayef/fsner-bert-base-uncased) | 10 | ontonotes5, conll2003, wnut2017, and fin (Alvarado et al.). | ## Installation and Example Usage ------ You can use the FSNER model in 3 ways: 1. Install directly from PyPI: `pip install fsner` and import the model as shown in the code example below or 2. Install from source: `python setup.py install` and import the model as shown in the code example below or 3. Clone repo and change directory to `src` and import the model as shown in the code example below ```python from fsner import FSNERModel, FSNERTokenizerUtils model = FSNERModel("sayef/fsner-bert-base-uncased") tokenizer = FSNERTokenizerUtils("sayef/fsner-bert-base-uncased") # size of query and supports must be the same. If you want to find all the entitites in one particular query, just repeat the same query n times where n is equal to the number of supports (or entities). query = [ 'KWE 4000 can reach with a maximum speed from up to 450 P/min an accuracy from 50 mg', 'I would like to order a computer from eBay.', ] # each list in supports are the examples of one entity type # wrap entities around with [E] and [/E] in the examples supports = [ [ 'Horizontal flow wrapper [E] Pack 403 [/E] features the new retrofit-kit „paper-ON-form“', '[E] Paloma Pick-and-Place-Roboter [/E] arranges the bakery products for the downstream tray-forming equipment', 'Finally, the new [E] Kliklok ACE [/E] carton former forms cartons and trays without the use of glue', 'We set up our pilot plant with the right [E] FibreForm® [/E] configuration to make prototypes for your marketing tests and package validation', 'The [E] CAR-T5 [/E] is a reliable, purely mechanically driven cartoning machine for versatile application fields' ], [ "[E] Walmart [/E] is a leading e-commerce company", "I recently ordered a book from [E] Amazon [/E]", "I ordered this from [E] ShopClues [/E]", "[E] Flipkart [/E] started it's journey from zero" ] ] device = 'cpu' W_query = tokenizer.tokenize(query).to(device) W_supports = tokenizer.tokenize(supports).to(device) start_prob, end_prob = model(W_query, W_supports) output = tokenizer.extract_entity_from_scores(query, W_query, start_prob, end_prob, thresh=0.50) print(output) ```
transformers/examples/research_projects/fsner/README.md/0
{ "file_path": "transformers/examples/research_projects/fsner/README.md", "repo_id": "transformers", "token_count": 1174 }
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class FlaxBigBirdForNaturalQuestionsModule(FlaxBigBirdForQuestionAnsweringModule): """ BigBirdForQuestionAnswering with CLS Head over the top for predicting category This way we can load its weights with FlaxBigBirdForQuestionAnswering """ config: BigBirdConfig dtype: jnp.dtype = jnp.float32 add_pooling_layer: bool = True def setup(self): super().setup() self.cls = nn.Dense(5, dtype=self.dtype) def __call__(self, *args, **kwargs): outputs = super().__call__(*args, **kwargs) cls_out = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class FlaxBigBirdForNaturalQuestions(FlaxBigBirdForQuestionAnswering): module_class = FlaxBigBirdForNaturalQuestionsModule def calculate_loss_for_nq(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooler_labels): def cross_entropy(logits, labels, reduction=None): """ Args: logits: bsz, seqlen, vocab_size labels: bsz, seqlen """ vocab_size = logits.shape[-1] labels = (labels[..., None] == jnp.arange(vocab_size)[None]).astype("f4") logits = jax.nn.log_softmax(logits, axis=-1) loss = -jnp.sum(labels * logits, axis=-1) if reduction is not None: loss = reduction(loss) return loss cross_entropy = partial(cross_entropy, reduction=jnp.mean) start_loss = cross_entropy(start_logits, start_labels) end_loss = cross_entropy(end_logits, end_labels) pooled_loss = cross_entropy(pooled_logits, pooler_labels) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class Args: model_id: str = "google/bigbird-roberta-base" logging_steps: int = 3000 save_steps: int = 10500 block_size: int = 128 num_random_blocks: int = 3 batch_size_per_device: int = 1 max_epochs: int = 5 # tx_args lr: float = 3e-5 init_lr: float = 0.0 warmup_steps: int = 20000 weight_decay: float = 0.0095 save_dir: str = "bigbird-roberta-natural-questions" base_dir: str = "training-expt" tr_data_path: str = "data/nq-training.jsonl" val_data_path: str = "data/nq-validation.jsonl" def __post_init__(self): os.makedirs(self.base_dir, exist_ok=True) self.save_dir = os.path.join(self.base_dir, self.save_dir) self.batch_size = self.batch_size_per_device * jax.device_count() @dataclass class DataCollator: pad_id: int max_length: int = 4096 # no dynamic padding on TPUs def __call__(self, batch): batch = self.collate_fn(batch) batch = jax.tree_util.tree_map(shard, batch) return batch def collate_fn(self, features): input_ids, attention_mask = self.fetch_inputs(features["input_ids"]) batch = { "input_ids": jnp.array(input_ids, dtype=jnp.int32), "attention_mask": jnp.array(attention_mask, dtype=jnp.int32), "start_labels": jnp.array(features["start_token"], dtype=jnp.int32), "end_labels": jnp.array(features["end_token"], dtype=jnp.int32), "pooled_labels": jnp.array(features["category"], dtype=jnp.int32), } return batch def fetch_inputs(self, input_ids: list): inputs = [self._fetch_inputs(ids) for ids in input_ids] return zip(*inputs) def _fetch_inputs(self, input_ids: list): attention_mask = [1 for _ in range(len(input_ids))] while len(input_ids) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def get_batched_dataset(dataset, batch_size, seed=None): if seed is not None: dataset = dataset.shuffle(seed=seed) for i in range(len(dataset) // batch_size): batch = dataset[i * batch_size : (i + 1) * batch_size] yield dict(batch) @partial(jax.pmap, axis_name="batch") def train_step(state, drp_rng, **model_inputs): def loss_fn(params): start_labels = model_inputs.pop("start_labels") end_labels = model_inputs.pop("end_labels") pooled_labels = model_inputs.pop("pooled_labels") outputs = state.apply_fn(**model_inputs, params=params, dropout_rng=drp_rng, train=True) start_logits, end_logits, pooled_logits = outputs return state.loss_fn( start_logits, start_labels, end_logits, end_labels, pooled_logits, pooled_labels, ) drp_rng, new_drp_rng = jax.random.split(drp_rng) grad_fn = jax.value_and_grad(loss_fn) loss, grads = grad_fn(state.params) metrics = jax.lax.pmean({"loss": loss}, axis_name="batch") grads = jax.lax.pmean(grads, "batch") state = state.apply_gradients(grads=grads) return state, metrics, new_drp_rng @partial(jax.pmap, axis_name="batch") def val_step(state, **model_inputs): start_labels = model_inputs.pop("start_labels") end_labels = model_inputs.pop("end_labels") pooled_labels = model_inputs.pop("pooled_labels") outputs = state.apply_fn(**model_inputs, params=state.params, train=False) start_logits, end_logits, pooled_logits = outputs loss = state.loss_fn(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooled_labels) metrics = jax.lax.pmean({"loss": loss}, axis_name="batch") return metrics class TrainState(train_state.TrainState): loss_fn: Callable = struct.field(pytree_node=False) @dataclass class Trainer: args: Args data_collator: Callable train_step_fn: Callable val_step_fn: Callable model_save_fn: Callable logger: wandb scheduler_fn: Callable = None def create_state(self, model, tx, num_train_steps, ckpt_dir=None): params = model.params state = TrainState.create( apply_fn=model.__call__, params=params, tx=tx, loss_fn=calculate_loss_for_nq, ) if ckpt_dir is not None: params, opt_state, step, args, data_collator = restore_checkpoint(ckpt_dir, state) tx_args = { "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } tx, lr = build_tx(**tx_args) state = train_state.TrainState( step=step, apply_fn=model.__call__, params=params, tx=tx, opt_state=opt_state, ) self.args = args self.data_collator = data_collator self.scheduler_fn = lr model.params = params state = jax_utils.replicate(state) return state def train(self, state, tr_dataset, val_dataset): args = self.args total = len(tr_dataset) // args.batch_size rng = jax.random.PRNGKey(0) drp_rng = jax.random.split(rng, jax.device_count()) for epoch in range(args.max_epochs): running_loss = jnp.array(0, dtype=jnp.float32) tr_dataloader = get_batched_dataset(tr_dataset, args.batch_size, seed=epoch) i = 0 for batch in tqdm(tr_dataloader, total=total, desc=f"Running EPOCH-{epoch}"): batch = self.data_collator(batch) state, metrics, drp_rng = self.train_step_fn(state, drp_rng, **batch) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 if i % args.logging_steps == 0: state_step = jax_utils.unreplicate(state.step) tr_loss = running_loss.item() / i lr = self.scheduler_fn(state_step - 1) eval_loss = self.evaluate(state, val_dataset) logging_dict = { "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(logging_dict)) self.logger.log(logging_dict, commit=True) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}", state=state) def evaluate(self, state, dataset): dataloader = get_batched_dataset(dataset, self.args.batch_size) total = len(dataset) // self.args.batch_size running_loss = jnp.array(0, dtype=jnp.float32) i = 0 for batch in tqdm(dataloader, total=total, desc="Evaluating ... "): batch = self.data_collator(batch) metrics = self.val_step_fn(state, **batch) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 return running_loss / i def save_checkpoint(self, save_dir, state): state = jax_utils.unreplicate(state) print(f"SAVING CHECKPOINT IN {save_dir}", end=" ... ") self.model_save_fn(save_dir, params=state.params) with open(os.path.join(save_dir, "opt_state.msgpack"), "wb") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args, os.path.join(save_dir, "args.joblib")) joblib.dump(self.data_collator, os.path.join(save_dir, "data_collator.joblib")) with open(os.path.join(save_dir, "training_state.json"), "w") as f: json.dump({"step": state.step.item()}, f) print("DONE") def restore_checkpoint(save_dir, state): print(f"RESTORING CHECKPOINT FROM {save_dir}", end=" ... ") with open(os.path.join(save_dir, "flax_model.msgpack"), "rb") as f: params = from_bytes(state.params, f.read()) with open(os.path.join(save_dir, "opt_state.msgpack"), "rb") as f: opt_state = from_bytes(state.opt_state, f.read()) args = joblib.load(os.path.join(save_dir, "args.joblib")) data_collator = joblib.load(os.path.join(save_dir, "data_collator.joblib")) with open(os.path.join(save_dir, "training_state.json"), "r") as f: training_state = json.load(f) step = training_state["step"] print("DONE") return params, opt_state, step, args, data_collator def scheduler_fn(lr, init_lr, warmup_steps, num_train_steps): decay_steps = num_train_steps - warmup_steps warmup_fn = optax.linear_schedule(init_value=init_lr, end_value=lr, transition_steps=warmup_steps) decay_fn = optax.linear_schedule(init_value=lr, end_value=1e-7, transition_steps=decay_steps) lr = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps]) return lr def build_tx(lr, init_lr, warmup_steps, num_train_steps, weight_decay): def weight_decay_mask(params): params = traverse_util.flatten_dict(params) mask = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(mask) lr = scheduler_fn(lr, init_lr, warmup_steps, num_train_steps) tx = optax.adamw(learning_rate=lr, weight_decay=weight_decay, mask=weight_decay_mask) return tx, lr
transformers/examples/research_projects/jax-projects/big_bird/bigbird_flax.py/0
{ "file_path": "transformers/examples/research_projects/jax-projects/big_bird/bigbird_flax.py", "repo_id": "transformers", "token_count": 5480 }
# Wav2Vec2 Contrastive Loss PreTraining examples The following example showcases how to pretrain a wav2vec2 model using the JAX/Flax backend. Pretraining Wav2Vec2 is rather complex, so it is highly recommended to read the [official paper](https://arxiv.org/abs/2006.11477). JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. `run_wav2vec2_pretrain_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets library or use your own files (jsonlines or csv), then pretrain the wav2vec2 architectures above on it. For custom datasets in `jsonlines` format please see: [the Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets#json-files) and you also will find examples of these below. Let's start by creating a model repository to save the trained model and logs. Here we call the model `"wav2vec2-base-robust"`, but you can change the model name as you like. You can do this either directly on [huggingface.co](https://huggingface.co/new) (assuming that you are logged in) or via the command line: ```bash huggingface-cli repo create wav2vec2-base-robust ``` Next we clone the model repository to add the tokenizer and model files. ```bash git clone https://huggingface.co/<your-username>/wav2vec2-base-robust ``` To ensure that all tensorboard traces will be uploaded correctly, we need to track them. You can run the following command inside your model repo to do so. ```bash cd wav2vec2-base-robust git lfs track "*tfevents*" ``` Great, we have set up our model repository. During training, we will automatically push the training logs and model weights to the repo. Next, let's add a symbolic link to the `run_wav2vec2_pretrain_flax`. ```bash export MODEL_DIR="./wav2vec2-base-robust" ln -s ~/transformers/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py ./ ``` ### Create the model configuration Let's first create the model configuration and store it in the model repository. Note that many training parameters can be set in the model configuration including the configuration about the masking distribution (`mask_time_length`, `mask_time_prob`), dropout (`attention_dropout`, ...), the trade-off between the contrastive loss and the diversity loss, etc... Mostly likely you will need to change these parameters depending on your use case. Again, we highly recommend to read the [official paper](https://arxiv.org/abs/2006.11477) to better understand which parameters can be set for pretraining. For this example, we will be using a `"base"`-sized model of Wav2Vec2 with robust layer norm and keep most of the default settings. ```python model_dir="./wav2vec2-base-robust" from transformers import Wav2Vec2Config config = Wav2Vec2Config.from_pretrained( "facebook/wav2vec2-base", mask_time_length=10, mask_time_prob=0.05, diversity_loss_weight=0.1, num_negatives=100, do_stable_layer_norm=True, feat_extract_norm="layer", ) config.save_pretrained(model_dir) ``` ### Create a feature extractor configuration Before we can start the training, we need to define a feature extractor that takes care of normalization, etc... Here we can also re-use the feature extractor of [wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base) while making sure that padding is allowed. ```python model_dir="./wav2vec2-base-robust" from transformers import Wav2Vec2FeatureExtractor config = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base", return_attention_mask=True) config.save_pretrained(model_dir) ``` ### Train the model Finally, we can run the example script to train the model: ```bash ./run_wav2vec2_pretrain_flax.py \ --output_dir=${MODEL_DIR} \ --num_train_epochs="5" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="32" \ --learning_rate="5e-4" \ --weight_decay="0.01" \ --warmup_steps="2000" \ --model_name_or_path=${MODEL_DIR} \ --dataset_name="librispeech_asr" \ --dataset_config_name="clean" \ --train_split_name="train.100" \ --preprocessing_num_workers="4" \ --max_duration_in_seconds="10.0" \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --pad_to_multiple_of="16384" \ --push_to_hub ``` Note that this script is not fully tested yet, so we cannot ensure that the above script leads to satisfying results.
transformers/examples/research_projects/jax-projects/wav2vec2/README.md/0
{ "file_path": "transformers/examples/research_projects/jax-projects/wav2vec2/README.md", "repo_id": "transformers", "token_count": 1507 }
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Masked BERT model configuration. It replicates the class `~transformers.BertConfig` and adapts it to the specificities of MaskedBert (`pruning_method`, `mask_init` and `mask_scale`.""" import logging from transformers.configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) class MaskedBertConfig(PretrainedConfig): """ A class replicating the `~transformers.BertConfig` with additional parameters for pruning/masking configuration. """ model_type = "masked_bert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, pruning_method="topK", mask_init="constant", mask_scale=0.0, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.pruning_method = pruning_method self.mask_init = mask_init self.mask_scale = mask_scale
transformers/examples/research_projects/movement-pruning/emmental/configuration_bert_masked.py/0
{ "file_path": "transformers/examples/research_projects/movement-pruning/emmental/configuration_bert_masked.py", "repo_id": "transformers", "token_count": 995 }
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ IMPORTANT: This code was copied from https://github.com/google-research/google-research/blob/master/performer/fast_self_attention/fast_self_attention.py on 6/11/2020. This is very new code, so it might be prone to change soon -> make sure to check the original code and update accordingly Core Fast Attention Module for Flax. Implementation of the approximate fast softmax and generalized attention mechanism leveraging structured random feature maps [RFM] techniques and low rank decomposition of the attention matrix. """ # pylint: disable=invalid-name, missing-function-docstring, line-too-long import abc import functools from collections.abc import Iterable # pylint: disable=g-importing-member import jax import jax.numpy as jnp import numpy as onp from absl import logging from jax import lax, random def nonnegative_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True, eps=0.0001 ): """ Constructs nonnegative kernel features for fast softmax attention Args: data: input for which features are computes projection_matrix: random matrix used to compute features attention_dims_t: tuple of attention dimensions batch_dims_t: tuple of batch dimensions precision: precision parameter is_query: predicate indicating whether input data corresponds to queries or keys normalize_data: predicate indicating whether data should be normalized, eps: numerical stabilizer Returns: Random features for fast softmax attention. """ del attention_dims_t if normalize_data: # We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where # w_norm = w * data_normalizer for w in {q,k}. data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) else: data_normalizer = 1.0 ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0]) data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix data_dash = lax.dot_general( data_normalizer * data, data_thick_random_matrix, (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), precision=precision, ) diag_data = jnp.square(data) diag_data = jnp.sum(diag_data, axis=data.ndim - 1) diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1) if is_query: last_dims_t = (len(data_dash.shape) - 1,) data_dash = ratio * ( jnp.exp(data_dash - diag_data - jnp.max(data_dash, axis=last_dims_t, keepdims=True)) + eps ) else: data_dash = ratio * (jnp.exp(data_dash - diag_data - jnp.max(data_dash)) + eps) return data_dash def sincos_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data=True ): """ Constructs kernel sin-cos features for fast softmax attention Args: data: input for which features are computes projection_matrix: random matrix used to compute features attention_dims_t: tuple of attention dimensions batch_dims_t: tuple of batch dimensions precision: precision parameter normalize_data: predicate indicating whether data should be normalized Returns: Random features for fast softmax attention. """ if normalize_data: # We have: exp(qk^T/sqrt{d}) = exp(|q|^2/2sqrt{d}) * exp(|k|^2/2sqrt{d}) * # exp(-(|q*c-k*c|^2)/2), where c = 1.0 / sqrt{sqrt{d}}. data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) else: data_normalizer = 1.0 ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0]) data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix data_dash = lax.dot_general( data_normalizer * data, data_thick_random_matrix, (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), precision=precision, ) data_dash_cos = ratio * jnp.cos(data_dash) data_dash_sin = ratio * jnp.sin(data_dash) data_dash = jnp.concatenate((data_dash_cos, data_dash_sin), axis=-1) # Constructing D_data and data^{'} diag_data = jnp.square(data) diag_data = jnp.sum(diag_data, axis=data.ndim - 1) diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1) # Additional renormalization for numerical stability data_renormalizer = jnp.max(diag_data, attention_dims_t, keepdims=True) diag_data -= data_renormalizer diag_data = jnp.exp(diag_data) data_prime = data_dash * diag_data return data_prime def generalized_kernel_feature_creator( data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data ): """ Constructs kernel features for fast generalized attention Args: data: input for which features are computes projection_matrix: matrix used to compute features batch_dims_t: tuple of batch dimensions precision: precision parameter kernel_fn: kernel function used kernel_epsilon: additive positive term added to every feature for numerical stability normalize_data: predicate indicating whether data should be normalized Returns: Random features for fast generalized attention. """ if normalize_data: data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) else: data_normalizer = 1.0 if projection_matrix is None: return kernel_fn(data_normalizer * data) + kernel_epsilon else: data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix data_dash = lax.dot_general( data_normalizer * data, data_thick_random_matrix, (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), precision=precision, ) data_prime = kernel_fn(data_dash) + kernel_epsilon return data_prime def make_fast_softmax_attention( qkv_dim, renormalize_attention=True, numerical_stabilizer=0.000001, nb_features=256, ortho_features=True, ortho_scaling=0.0, redraw_features=True, unidirectional=False, nonnegative_features=True, lax_scan_unroll=1, ): """Construct a fast softmax attention method.""" logging.info( "Fast softmax attention: %s features and orthogonal=%s, renormalize=%s", nb_features, ortho_features, renormalize_attention, ) if ortho_features: matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=ortho_scaling) else: matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim) if nonnegative_features: def kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True ): return nonnegative_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data, numerical_stabilizer, ) else: def kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True ): del is_query return sincos_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data ) attention_fn = FastAttentionviaLowRankDecomposition( matrix_creator, kernel_feature_creator, renormalize_attention=renormalize_attention, numerical_stabilizer=numerical_stabilizer, redraw_features=redraw_features, unidirectional=unidirectional, lax_scan_unroll=lax_scan_unroll, ).dot_product_attention return attention_fn def make_fast_generalized_attention( qkv_dim, renormalize_attention=True, numerical_stabilizer=0.0, nb_features=256, features_type="deterministic", kernel_fn=jax.nn.relu, kernel_epsilon=0.001, redraw_features=False, unidirectional=False, lax_scan_unroll=1, ): """Construct a fast generalized attention menthod.""" logging.info("Fast generalized attention.: %s features and renormalize=%s", nb_features, renormalize_attention) if features_type == "ortho": matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False) elif features_type == "iid": matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim) elif features_type == "deterministic": matrix_creator = None else: raise ValueError("Unknown feature value type") def kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=False ): del attention_dims_t del is_query return generalized_kernel_feature_creator( data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data ) attention_fn = FastAttentionviaLowRankDecomposition( matrix_creator, kernel_feature_creator, renormalize_attention=renormalize_attention, numerical_stabilizer=numerical_stabilizer, redraw_features=redraw_features, unidirectional=unidirectional, lax_scan_unroll=lax_scan_unroll, ).dot_product_attention return attention_fn class RandomMatrix: r""" Abstract class providing a method for constructing 2D random arrays. Class is responsible for constructing 2D random arrays. """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def get_2d_array(self): raise NotImplementedError("Abstract method") class GaussianUnstructuredRandomMatrix(RandomMatrix): def __init__(self, nb_rows, nb_columns, key): self.nb_rows = nb_rows self.nb_columns = nb_columns self.key = key def get_2d_array(self): return random.normal(self.key, (self.nb_rows, self.nb_columns)) class GaussianOrthogonalRandomMatrix(RandomMatrix): r""" Class providing a method to create Gaussian orthogonal matrix. Class is responsible for constructing 2D Gaussian orthogonal arrays. """ def __init__(self, nb_rows, nb_columns, key, scaling=0): self.nb_rows = nb_rows self.nb_columns = nb_columns self.key = key self.scaling = scaling def get_2d_array(self): nb_full_blocks = int(self.nb_rows / self.nb_columns) block_list = [] rng = self.key for _ in range(nb_full_blocks): rng, rng_input = jax.random.split(rng) unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns)) q, _ = jnp.linalg.qr(unstructured_block) q = jnp.transpose(q) block_list.append(q) remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns if remaining_rows > 0: rng, rng_input = jax.random.split(rng) unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns)) q, _ = jnp.linalg.qr(unstructured_block) q = jnp.transpose(q) block_list.append(q[0:remaining_rows]) final_matrix = jnp.vstack(block_list) if self.scaling == 0: multiplier = jnp.linalg.norm(random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1) elif self.scaling == 1: multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows)) else: raise ValueError("Scaling must be one of {0, 1}. Was %s" % self._scaling) return jnp.matmul(jnp.diag(multiplier), final_matrix) class FastAttention: r""" Abstract class providing a method for fast attention. Class is responsible for providing a method <dot_product_attention> for fast approximate attention. """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def dot_product_attention( self, query, key, value, dtype=jnp.float32, bias=None, axis=None, broadcast_dropout=True, dropout_rng=None, dropout_rate=0.0, deterministic=False, precision=None, ): """ Computes dot-product attention given query, key, and value. This is the core function for applying fast approximate dot-product attention. It calculates the attention weights given query and key and combines the values using the attention weights. This function supports multi-dimensional inputs Args: query: queries for calculating attention with shape of [batch_size, dim1, dim2, ..., dimN, num_heads, mem_channels]. key: keys for calculating attention with shape of [batch_size, dim1, dim2, ..., dimN, num_heads, mem_channels]. value: values to be used in attention with shape of [batch_size, dim1, dim2,..., dimN, num_heads, value_channels]. dtype: the dtype of the computation (default: float32) bias: bias for the attention weights. This can be used for incorporating autoregressive mask, padding mask, proximity bias. axis: axises over which the attention is applied. broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rng: JAX PRNGKey: to be used for dropout. dropout_rate: dropout rate. deterministic: bool, deterministic or not (to apply dropout). precision: numerical precision of the computation see `jax.lax.Precision` for details Returns: Output of shape [bs, dim1, dim2, ..., dimN,, num_heads, value_channels]. """ raise NotImplementedError("Abstract method") def _numerator(z_slice_shape, precision, unroll=1): def fwd(qs, ks, vs): def body(p, qkv): (q, k, v) = qkv p += jnp.einsum("...m,...d->...md", k, v, precision=precision) X_slice = jnp.einsum("...m,...md->...d", q, p, precision=precision) return p, X_slice init_value = jnp.zeros(z_slice_shape) p, W = lax.scan(body, init_value, (qs, ks, vs), unroll=unroll) return W, (p, qs, ks, vs) def bwd(pqkv, W_ct): def body(carry, qkv_xct): p, p_ct = carry q, k, v, x_ct = qkv_xct q_ct = jnp.einsum("...d,...md->...m", x_ct, p, precision=precision) p_ct += jnp.einsum("...d,...m->...md", x_ct, q, precision=precision) k_ct = jnp.einsum("...md,...d->...m", p_ct, v, precision=precision) v_ct = jnp.einsum("...md,...m->...d", p_ct, k, precision=precision) p -= jnp.einsum("...m,...d->...md", k, v, precision=precision) return (p, p_ct), (q_ct, k_ct, v_ct) p, qs, ks, vs = pqkv _, (qs_ct, ks_ct, vs_ct) = lax.scan( body, (p, jnp.zeros_like(p)), (qs, ks, vs, W_ct), reverse=True, unroll=unroll ) return qs_ct, ks_ct, vs_ct @jax.custom_vjp def _numerator_impl(qs, ks, vs): W, _ = fwd(qs, ks, vs) return W _numerator_impl.defvjp(fwd, bwd) return _numerator_impl def _denominator(t_slice_shape, precision, unroll=1): def fwd(qs, ks): def body(p, qk): q, k = qk p += k x = jnp.einsum("...m,...m->...", q, p, precision=precision) return p, x p = jnp.zeros(t_slice_shape) p, R = lax.scan(body, p, (qs, ks), unroll=unroll) return R, (qs, ks, p) def bwd(qkp, R_ct): def body(carry, qkx): p, p_ct = carry q, k, x_ct = qkx q_ct = jnp.einsum("...,...m->...m", x_ct, p, precision=precision) p_ct += jnp.einsum("...,...m->...m", x_ct, q, precision=precision) k_ct = p_ct p -= k return (p, p_ct), (q_ct, k_ct) qs, ks, p = qkp _, (qs_ct, ks_ct) = lax.scan(body, (p, jnp.zeros_like(p)), (qs, ks, R_ct), reverse=True, unroll=unroll) return (qs_ct, ks_ct) @jax.custom_vjp def _denominator_impl(qs, ks): R, _ = fwd(qs, ks) return R _denominator_impl.defvjp(fwd, bwd) return _denominator_impl class FastAttentionviaLowRankDecomposition(FastAttention): r""" Class providing a method for fast attention via low rank decomposition. Class is responsible for providing a method <dot_product_attention> for fast dot-product attention with the use of low rank decomposition (e.g. with random feature maps). """ def __init__( self, matrix_creator, kernel_feature_creator, renormalize_attention, numerical_stabilizer, redraw_features, unidirectional, lax_scan_unroll=1, ): # For optimal GPU performance, set to 16. rng = random.PRNGKey(0) self.matrix_creator = matrix_creator self.projection_matrix = self.draw_weights(rng) self.kernel_feature_creator = kernel_feature_creator self.renormalize_attention = renormalize_attention self.numerical_stabilizer = numerical_stabilizer self.redraw_features = redraw_features self.unidirectional = unidirectional self.lax_scan_unroll = lax_scan_unroll def draw_weights(self, key): if self.matrix_creator is None: return None matrixrng, _ = random.split(key) projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array() return projection_matrix def dot_product_attention( self, query, key, value, dtype=jnp.float32, bias=None, axis=None, broadcast_dropout=True, dropout_rng=None, dropout_rate=0.0, deterministic=False, precision=None, ): assert key.shape[:-1] == value.shape[:-1] assert query.shape[0:1] == key.shape[0:1] and query.shape[-1] == key.shape[-1] if axis is None: axis = tuple(range(1, key.ndim - 2)) if not isinstance(axis, Iterable): axis = (axis,) assert key.ndim == query.ndim assert key.ndim == value.ndim for ax in axis: if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2): raise ValueError("Attention axis must be between the batch axis and the last-two axes.") n = key.ndim # Constructing projection tensor. if self.redraw_features: # TODO(kchoro): Get rid of the constant below. query_seed = lax.convert_element_type(jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32) rng = random.PRNGKey(query_seed) self.projection_matrix = self.draw_weights(rng) # batch_dims is <bs, <non-attention dims>, num_heads> batch_dims = tuple(onp.delete(range(n), axis + (n - 1,))) # q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels) qk_perm = batch_dims + axis + (n - 1,) k_extra_perm = axis + batch_dims + (n - 1,) key_extra = key.transpose(k_extra_perm) key = key.transpose(qk_perm) query = query.transpose(qk_perm) # v -> (bs, <non-attention dims>, num_heads, <attention dims>, channels) v_perm = batch_dims + axis + (n - 1,) value = value.transpose(v_perm) batch_dims_t = tuple(range(len(batch_dims))) attention_dims_t = tuple(range(len(batch_dims), len(batch_dims) + len(axis))) # Constructing tensors Q^{'} and K^{'}. query_prime = self.kernel_feature_creator( query, self.projection_matrix, attention_dims_t, batch_dims_t, precision, True ) key_prime = self.kernel_feature_creator( key, self.projection_matrix, attention_dims_t, batch_dims_t, precision, False ) if self.unidirectional: index = attention_dims_t[0] z_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) + (value.shape[-1],) numerator_fn = _numerator(z_slice_shape, precision, self.lax_scan_unroll) W = numerator_fn( jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0), jnp.moveaxis(value, index, 0) ) # Constructing W = (Q^{'}(K^{'})^{T})_{masked}V W = jnp.moveaxis(W, 0, index) if not self.renormalize_attention: # Unidirectional, not-normalized attention. perm_inv = _invert_perm(qk_perm) result = W.transpose(perm_inv) return result else: # Unidirectional, normalized attention. thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)]) index = attention_dims_t[0] t_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) denominator_fn = _denominator(t_slice_shape, precision, self.lax_scan_unroll) R = denominator_fn(jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0)) R = jnp.moveaxis(R, 0, index) else: contract_query = tuple(range(len(batch_dims) + len(axis), len(batch_dims) + len(axis) + 1)) contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1)) # Constructing Z = (K^{'})^{T}V # Z (bs, <non-attention dims>, num_heads, channels_m, channels_v) Z = lax.dot_general( key_prime, value, ((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)), precision=precision, ) # Constructing W = Q^{'}Z = Q^{'}(K^{'})^{T}V # q (bs, <non-attention dims>, num_heads, <attention dims>, channels_m) # Z (bs, <non-attention dims>, num_heads, channels_m, channels_v) # W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v) W = lax.dot_general( query_prime, Z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)), precision=precision ) if not self.renormalize_attention: # Bidirectional, not-normalized attention. perm_inv = _invert_perm(qk_perm) result = W.transpose(perm_inv) return result else: # Bidirectional, normalized attention. thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)]) contract_key = tuple(range(len(batch_dims), len(batch_dims) + len(axis))) contract_thick_all_ones = tuple(range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim)) # Construct T = (K^{'})^{T} 1_L # k (bs, <non-attention dims>, num_heads, <attention dims>, channels) T = lax.dot_general( key_prime, thick_all_ones, ((contract_key, contract_thick_all_ones), (batch_dims_t, batch_dims_t)), precision=precision, ) # Construct partition function: R = Q^{'} T = Q^{'}(K^{'})^{T} 1_L # q_p (bs, <non-attention dims>, num_heads, <attention dims>, channs_m) # T (bs, <non-attention dims>, num_heads, channels_m) R = lax.dot_general( query_prime, T, (((query_prime.ndim - 1,), (T.ndim - 1,)), (batch_dims_t, range(0, len(T.shape) - 1))), precision=precision, ) R = R + 2 * self.numerical_stabilizer * (jnp.abs(R) <= self.numerical_stabilizer) R = jnp.reciprocal(R) R = jnp.expand_dims(R, len(R.shape)) # W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v) # R (bs, <non-attention dims>, num_heads, <attention dims>, extra_channel) result = W * R # back to (bs, dim1, dim2, ..., dimN, num_heads, channels) perm_inv = _invert_perm(qk_perm) result = result.transpose(perm_inv) return result def _invert_perm(perm): perm_inv = [0] * len(perm) for i, j in enumerate(perm): perm_inv[j] = i return tuple(perm_inv)
transformers/examples/research_projects/performer/modeling_flax_performer_utils.py/0
{ "file_path": "transformers/examples/research_projects/performer/modeling_flax_performer_utils.py", "repo_id": "transformers", "token_count": 11667 }
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # Copyright 2021 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A subclass of `Trainer` specific to Question-Answering tasks """ import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_xla_available from transformers.trainer_utils import PredictionOutput logger = logging.getLogger(__name__) if is_torch_xla_available(): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class QuestionAnsweringTrainer(Trainer): def __init__(self, *args, eval_examples=None, post_process_function=None, quant_trainer_args=None, **kwargs): super().__init__(*args, **kwargs) self.eval_examples = eval_examples self.post_process_function = post_process_function self.quant_trainer_args = quant_trainer_args self.calib_num = 128 # default number of calibration samples def get_calib_dataloader(self, calib_dataset=None): """ Returns the calibration dataloader :class:`~torch.utils.data.DataLoader`. Args: calib_dataset (:obj:`torch.utils.data.Dataset`, `optional`) """ if calib_dataset is None and self.calib_dataset is None: raise ValueError("Trainer: calibration requires an calib_dataset.") calib_dataset = calib_dataset if calib_dataset is not None else self.calib_dataset calib_dataset = self._remove_unused_columns(calib_dataset, description="Calibration") return DataLoader( calib_dataset, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory, shuffle=True, ) def calibrate(self, calib_dataset=None): calib_dataset = self.train_dataset if calib_dataset is None else calib_dataset calib_dataloader = self.get_calib_dataloader(calib_dataset) model = self.model quant_trainer.configure_model(model, self.quant_trainer_args, calib=True) model.eval() quant_trainer.enable_calibration(model) logger.info("***** Running calibration *****") logger.info(f" Num examples = {self.calib_num}") logger.info(f" Batch size = {calib_dataloader.batch_size}") for step, inputs in enumerate(calib_dataloader): # Prediction step loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only=True) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(model, self.quant_trainer_args) self.model = model def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"): eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset eval_dataloader = self.get_eval_dataloader(eval_dataset) eval_examples = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. compute_metrics = self.compute_metrics self.compute_metrics = None eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: output = eval_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if compute_metrics is None else None, ignore_keys=ignore_keys, ) finally: self.compute_metrics = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions) metrics = self.compute_metrics(eval_preds) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) self.log(metrics) else: metrics = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) return metrics def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str = "test"): predict_dataloader = self.get_test_dataloader(predict_dataset) # Temporarily disable metric computation, we will do it in the loop here. compute_metrics = self.compute_metrics self.compute_metrics = None eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: output = eval_loop( predict_dataloader, description="Prediction", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if compute_metrics is None else None, ignore_keys=ignore_keys, ) finally: self.compute_metrics = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, "predict") metrics = self.compute_metrics(predictions) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics) def save_onnx(self, output_dir="./"): eval_dataset = self.eval_dataset eval_dataloader = self.get_eval_dataloader(eval_dataset) batch = next(iter(eval_dataloader)) # saving device - to make it consistent device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # convert to tuple input_tuple = tuple(v.to(device) for k, v in batch.items()) logger.info("Converting model to be onnx compatible") from pytorch_quantization.nn import TensorQuantizer TensorQuantizer.use_fb_fake_quant = True model = self.model.to(device) model.eval() model.float() model_to_save = model.module if hasattr(model, "module") else model quant_trainer.configure_model(model_to_save, self.quant_trainer_args) output_model_file = os.path.join(output_dir, "model.onnx") logger.info(f"exporting model to {output_model_file}") axes = {0: "batch_size", 1: "seq_len"} torch.onnx.export( model_to_save, input_tuple, output_model_file, export_params=True, opset_version=13, do_constant_folding=True, input_names=["input_ids", "attention_mask", "token_type_ids"], output_names=["output_start_logits", "output_end_logits"], dynamic_axes={ "input_ids": axes, "attention_mask": axes, "token_type_ids": axes, "output_start_logits": axes, "output_end_logits": axes, }, verbose=True, ) logger.info("onnx export finished")
transformers/examples/research_projects/quantization-qdqbert/trainer_quant_qa.py/0
{ "file_path": "transformers/examples/research_projects/quantization-qdqbert/trainer_quant_qa.py", "repo_id": "transformers", "token_count": 3711 }
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Self-training for sequence classification.""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy logger = logging.getLogger(__name__) MODEL_BIN_FILE = "pytorch_model.bin" @dataclasses.dataclass class STModelArguments: """Arguments pertaining to which config/tokenizer/model we are going to fine-tune from.""" model_name_or_path: str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) cache_dir: Optional[str] = dataclasses.field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, ) @dataclasses.dataclass class STDataArguments: """Arguments pertaining to what data we are going to input our model for training and evaluation.""" train_file: str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."}) infer_file: str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."}) eval_file: Optional[str] = dataclasses.field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) task_name: Optional[str] = dataclasses.field( default=None, metadata={"help": "The name of the task to train on."}, ) label_list: Optional[List[str]] = dataclasses.field( default=None, metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class STTrainingArguments: """Training arguments pertaining to the training loop itself.""" output_dir: str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) eval_metric: Optional[str] = dataclasses.field( default="accuracy", metadata={"help": "The evaluation metric used for the task."} ) eval_strategy: Optional[str] = dataclasses.field( default="no", metadata={ "help": 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' }, ) early_stopping_patience: Optional[int] = dataclasses.field( default=10, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, ) early_stopping_threshold: Optional[float] = dataclasses.field( default=0.0, metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." }, ) do_filter_by_confidence: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."}, ) do_filter_by_val_performance: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."}, ) finetune_on_labeled_data: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to fine-tune on labeled data after pseudo training."}, ) confidence_threshold: Optional[float] = dataclasses.field( default=0.0, metadata={"help": "Confidence threshold for pseudo-labeled data filtering."}, ) max_selftrain_iterations: Optional[int] = dataclasses.field( default=100, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, ) seed: Optional[int] = dataclasses.field( default=None, metadata={"help": "Random seed for initialization."}, ) def create_pseudo_labeled_data(args, infer_input, infer_output, eval_result, id2label, next_data_dir): """Create pseudeo labeled data for the next self-training iteration.""" dataset = datasets.concatenate_datasets([infer_input, infer_output], axis=1) if args.do_filter_by_confidence: dataset = dataset.filter(lambda example: example["probability"] > args.confidence_threshold) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 num_selected_rows = int(eval_result * len(dataset)) print(num_selected_rows) dataset = dataset.sort("probability", reverse=True) dataset = dataset.select(range(num_selected_rows)) dataset = dataset.remove_columns(["label", "probability"]) dataset = dataset.rename_column("prediction", "label") dataset = dataset.map(lambda example: {"label": id2label[example["label"]]}) dataset = dataset.shuffle(seed=args.seed) pseudo_labeled_data_file = os.path.join(next_data_dir, f"train_pseudo.{args.data_file_extension}") if args.data_file_extension == "csv": dataset.to_csv(pseudo_labeled_data_file, index=False) else: dataset.to_json(pseudo_labeled_data_file) def selftrain(model_name_or_path, train_file, infer_file, output_dir, **kwargs): """Self-training a pre-trained model on a downstream task. Args: model_name_or_path: Path to pretrained model or model identifier from huggingface.co/models. train_file: A csv or a json file containing the training data. infer_file: A csv or a json file containing the data to predict on. output_dir: The output directory where the model predictions and checkpoints will be written. **kwargs: Dictionary of key/value pairs with which to update the configuration object after loading. The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. """ # Initialize the accelerator. We will let the accelerator handle device # placement for us. accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() model_args = STModelArguments(model_name_or_path=model_name_or_path) data_args = STDataArguments(train_file=train_file, infer_file=infer_file) training_args = STTrainingArguments(output_dir=output_dir) args = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(arg_class).items(): setattr(args, key, value) for key, value in kwargs.items(): if hasattr(args, key): setattr(args, key, value) # Sanity checks data_files = {} args.data_file_extension = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None data_files["train"] = args.train_file data_files["infer"] = args.infer_file if args.eval_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None data_files["eval"] = args.eval_file for key in data_files: extension = data_files[key].split(".")[-1] assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file." if args.data_file_extension is None: args.data_file_extension = extension else: assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`." assert ( args.eval_metric in datasets.list_metrics() ), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) logger.info("Creating the initial data directory for self-training...") data_dir_format = f"{args.output_dir}/self-train_iter-{{}}".format initial_data_dir = data_dir_format(0) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) os.makedirs(initial_data_dir, exist_ok=True) accelerator.wait_for_everyone() best_iteration = None best_eval_result = None early_stopping_patience_counter = 0 should_training_stop = False # Show the progress bar progress_bar = tqdm(range(args.max_selftrain_iterations), disable=not accelerator.is_local_main_process) # Self-train for iteration in range(0, int(args.max_selftrain_iterations)): current_data_dir = data_dir_format(iteration) assert os.path.exists(current_data_dir) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 current_output_dir = os.path.join(current_data_dir, "stage-1") arguments_dict = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "eval_strategy": args.eval_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(training_args, key): arguments_dict.update({key: value}) model_bin_file_path = os.path.join(current_output_dir, "best-checkpoint", MODEL_BIN_FILE) if os.path.exists(model_bin_file_path): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.", model_bin_file_path, iteration, ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****", iteration) finetune(**arguments_dict) accelerator.wait_for_everyone() assert os.path.exists(model_bin_file_path) logger.info("Self-training job completed: iteration: %d, stage: 1.", iteration) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data model_path = os.path.join(current_output_dir, "best-checkpoint") current_output_dir = os.path.join(current_data_dir, "stage-2") # Update arguments_dict arguments_dict["model_name_or_path"] = model_path arguments_dict["train_file"] = data_files["train"] arguments_dict["output_dir"] = current_output_dir model_bin_file_path = os.path.join(current_output_dir, "best-checkpoint", MODEL_BIN_FILE) if os.path.exists(model_bin_file_path): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.", model_bin_file_path, iteration, ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****", iteration) finetune(**arguments_dict) accelerator.wait_for_everyone() assert os.path.exists(model_bin_file_path) logger.info("Self-training job completed: iteration: %d, stage: 2.", iteration) new_iteration = iteration next_data_dir = data_dir_format(iteration + 1) config = AutoConfig.from_pretrained(os.path.join(current_output_dir, "best-checkpoint")) id2label = config.id2label eval_results_file = os.path.join(current_output_dir, "eval_results_best-checkpoint.json") test_results_file = os.path.join(current_output_dir, "test_results_best-checkpoint.json") assert os.path.exists(eval_results_file) with open(eval_results_file, "r") as f: eval_result = float(json.load(f)[args.eval_metric]) infer_output_file = os.path.join(current_output_dir, "infer_output_best-checkpoint.csv") assert os.path.exists(infer_output_file) # Loading the dataset from local csv or json files. infer_input = load_dataset(args.data_file_extension, data_files={"data": data_files["infer"]})["data"] infer_output = load_dataset("csv", data_files={"data": infer_output_file})["data"] if accelerator.is_main_process: os.makedirs(next_data_dir, exist_ok=True) shutil.copy(eval_results_file, os.path.join(output_dir, f"eval_results_iter-{iteration}.json")) if os.path.exists(test_results_file): shutil.copy(eval_results_file, os.path.join(output_dir, f"test_results_iter-{iteration}.json")) create_pseudo_labeled_data(args, infer_input, infer_output, eval_result, id2label, next_data_dir) accelerator.wait_for_everyone() data_files["train_pseudo"] = os.path.join(next_data_dir, f"train_pseudo.{args.data_file_extension}") if args.eval_strategy != IntervalStrategy.NO.value: new_eval_result = eval_result if best_iteration is None: best_iteration = new_iteration best_eval_result = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: best_iteration = new_iteration best_eval_result = new_eval_result early_stopping_patience_counter = 0 else: if new_eval_result == best_eval_result: best_iteration = new_iteration best_eval_result = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: should_training_stop = True progress_bar.update(1) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d", best_iteration) logger.info("Best evaluation result: %s = %f", args.eval_metric, best_eval_result) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(output_dir, f"eval_results_iter-{iteration}.json"), os.path.join(output_dir, "eval_results_best-iteration.json"), ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d", args.max_selftrain_iterations - 1) logger.info("Best evaluation result: %s = %f", args.eval_metric, eval_result) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(output_dir, f"eval_results_iter-{args.max_selftrain_iterations - 1}.json"), os.path.join(output_dir, "eval_results_best-iteration.json"), )
transformers/examples/research_projects/self-training-text-classification/selftraining.py/0
{ "file_path": "transformers/examples/research_projects/self-training-text-classification/selftraining.py", "repo_id": "transformers", "token_count": 6788 }
# Add parent directory to python path to access lightning_base.py export PYTHONPATH="../":"${PYTHONPATH}" python finetune.py \ --data_dir=$CNN_DIR \ --learning_rate=3e-5 \ --train_batch_size=$BS \ --eval_batch_size=$BS \ --output_dir=$OUTPUT_DIR \ --max_source_length=512 \ --max_target_length=56 \ --val_check_interval=0.1 --n_val=200 \ --do_train --do_predict \ "$@"
transformers/examples/research_projects/seq2seq-distillation/finetune_t5.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/finetune_t5.sh", "repo_id": "transformers", "token_count": 148 }
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class ProcessorGradientFlow: """ This wraps the huggingface CLIP processor to allow backprop through the image processing step. The original processor forces conversion to PIL images, which is faster for image processing but breaks gradient flow. We call the original processor to get the text embeddings, but use our own image processing to keep images as torch tensors. """ def __init__(self, device: str = "cpu", clip_model: str = "openai/clip-vit-large-patch14") -> None: self.device = device self.tokenizer = CLIPTokenizerFast.from_pretrained(clip_model) self.image_mean = [0.48145466, 0.4578275, 0.40821073] self.image_std = [0.26862954, 0.26130258, 0.27577711] self.normalize = torchvision.transforms.Normalize(self.image_mean, self.image_std) self.resize = torchvision.transforms.Resize(224) self.center_crop = torchvision.transforms.CenterCrop(224) def preprocess_img(self, images): images = self.resize(images) images = self.center_crop(images) images = self.normalize(images) return images def __call__(self, text=None, images=None, **kwargs): encoding = self.tokenizer(text=text, **kwargs) encoding["pixel_values"] = self.preprocess_img(images) encoding = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class VQGAN_CLIP(nn.Module): def __init__( self, iterations=10, lr=0.01, vqgan=None, vqgan_config=None, vqgan_checkpoint=None, clip=None, clip_preprocessor=None, device=None, log=False, save_vector=True, return_val="image", quantize=True, save_intermediate=False, show_intermediate=False, make_grid=False, ) -> None: """ Instantiate a VQGAN_CLIP model. If you want to use a custom VQGAN model, pass it as vqgan. """ super().__init__() self.latent = None self.device = device if device else get_device() if vqgan: self.vqgan = vqgan else: self.vqgan = load_vqgan(self.device, conf_path=vqgan_config, ckpt_path=vqgan_checkpoint) self.vqgan.eval() if clip: self.clip = clip else: self.clip = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") self.clip.to(self.device) self.clip_preprocessor = ProcessorGradientFlow(device=self.device) self.iterations = iterations self.lr = lr self.log = log self.make_grid = make_grid self.return_val = return_val self.quantize = quantize self.latent_dim = self.vqgan.decoder.z_shape def make_animation(self, input_path=None, output_path=None, total_duration=5, extend_frames=True): """ Make an animation from the intermediate images saved during generation. By default, uses the images from the most recent generation created by the generate function. If you want to use images from a different generation, pass the path to the folder containing the images as input_path. """ images = [] if output_path is None: output_path = "./animation.gif" if input_path is None: input_path = self.save_path paths = sorted(glob(input_path + "/*")) if not len(paths): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)" ) if len(paths) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)") frame_duration = total_duration / len(paths) durations = [frame_duration] * len(paths) if extend_frames: durations[0] = 1.5 durations[-1] = 3 for file_name in paths: if file_name.endswith(".png"): images.append(imageio.imread(file_name)) imageio.mimsave(output_path, images, duration=durations) print(f"gif saved to {output_path}") def _get_latent(self, path=None, img=None): if not (path or img): raise ValueError("Input either path or tensor") if img is not None: raise NotImplementedError x = preprocess(Image.open(path), target_image_size=256).to(self.device) x_processed = preprocess_vqgan(x) z, *_ = self.vqgan.encode(x_processed) return z def _add_vector(self, transform_vector): """Add a vector transform to the base latent and returns the resulting image.""" base_latent = self.latent.detach().requires_grad_() trans_latent = base_latent + transform_vector if self.quantize: z_q, *_ = self.vqgan.quantize(trans_latent) else: z_q = trans_latent return self.vqgan.decode(z_q) def _get_clip_similarity(self, prompts, image, weights=None): clip_inputs = self.clip_preprocessor(text=prompts, images=image, return_tensors="pt", padding=True) clip_outputs = self.clip(**clip_inputs) similarity_logits = clip_outputs.logits_per_image if weights is not None: similarity_logits = similarity_logits * weights return similarity_logits.sum() def _get_clip_loss(self, pos_prompts, neg_prompts, image): pos_logits = self._get_clip_similarity(pos_prompts["prompts"], image, weights=(1 / pos_prompts["weights"])) if neg_prompts: neg_logits = self._get_clip_similarity(neg_prompts["prompts"], image, weights=neg_prompts["weights"]) else: neg_logits = torch.tensor([1], device=self.device) loss = -torch.log(pos_logits) + torch.log(neg_logits) return loss def _optimize_CLIP(self, original_img, pos_prompts, neg_prompts): vector = torch.randn_like(self.latent, requires_grad=True, device=self.device) optim = torch.optim.Adam([vector], lr=self.lr) for i in range(self.iterations): optim.zero_grad() transformed_img = self._add_vector(vector) processed_img = loop_post_process(transformed_img) clip_loss = self._get_CLIP_loss(pos_prompts, neg_prompts, processed_img) print("CLIP loss", clip_loss) if self.log: wandb.log({"CLIP Loss": clip_loss}) clip_loss.backward(retain_graph=True) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def _init_logging(self, positive_prompts, negative_prompts, image_path): wandb.init(reinit=True, project="face-editor") wandb.config.update({"Positive Prompts": positive_prompts}) wandb.config.update({"Negative Prompts": negative_prompts}) wandb.config.update({"lr": self.lr, "iterations": self.iterations}) if image_path: image = Image.open(image_path) image = image.resize((256, 256)) wandb.log("Original Image", wandb.Image(image)) def process_prompts(self, prompts): if not prompts: return [] processed_prompts = [] weights = [] if isinstance(prompts, str): prompts = [prompt.strip() for prompt in prompts.split("|")] for prompt in prompts: if isinstance(prompt, (tuple, list)): processed_prompt = prompt[0] weight = float(prompt[1]) elif ":" in prompt: processed_prompt, weight = prompt.split(":") weight = float(weight) else: processed_prompt = prompt weight = 1.0 processed_prompts.append(processed_prompt) weights.append(weight) return { "prompts": processed_prompts, "weights": torch.tensor(weights, device=self.device), } def generate( self, pos_prompts, neg_prompts=None, image_path=None, show_intermediate=True, save_intermediate=False, show_final=True, save_final=True, save_path=None, ): """Generate an image from the given prompts. If image_path is provided, the image is used as a starting point for the optimization. If image_path is not provided, a random latent vector is used as a starting point. You must provide at least one positive prompt, and optionally provide negative prompts. Prompts must be formatted in one of the following ways: - A single prompt as a string, e.g "A smiling woman" - A set of prompts separated by pipes: "A smiling woman | a woman with brown hair" - A set of prompts and their weights separated by colons: "A smiling woman:1 | a woman with brown hair: 3" (default weight is 1) - A list of prompts, e.g ["A smiling woman", "a woman with brown hair"] - A list of prompts and weights, e.g [("A smiling woman", 1), ("a woman with brown hair", 3)] """ if image_path: self.latent = self._get_latent(image_path) else: self.latent = torch.randn(self.latent_dim, device=self.device) if self.log: self._init_logging(pos_prompts, neg_prompts, image_path) assert pos_prompts, "You must provide at least one positive prompt." pos_prompts = self.process_prompts(pos_prompts) neg_prompts = self.process_prompts(neg_prompts) if save_final and save_path is None: save_path = os.path.join("./outputs/", "_".join(pos_prompts["prompts"])) if not os.path.exists(save_path): os.makedirs(save_path) else: save_path = save_path + "_" + get_timestamp() os.makedirs(save_path) self.save_path = save_path original_img = self.vqgan.decode(self.latent)[0] if show_intermediate: print("Original Image") show_pil(custom_to_pil(original_img)) original_img = loop_post_process(original_img) for iter, transformed_img in enumerate(self._optimize_CLIP(original_img, pos_prompts, neg_prompts)): if show_intermediate: show_pil(transformed_img) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png")) if self.log: wandb.log({"Image": wandb.Image(transformed_img)}) if show_final: show_pil(transformed_img) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
transformers/examples/research_projects/vqgan-clip/VQGAN_CLIP.py/0
{ "file_path": "transformers/examples/research_projects/vqgan-clip/VQGAN_CLIP.py", "repo_id": "transformers", "token_count": 4997 }
#!/usr/bin/env python # coding=utf-8 # Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for multiple choice. """ # You can also adapt this script on your own multiple choice task. Pointers for this are left as comments. import json import logging import os import sys from dataclasses import dataclass, field from itertools import chain from pathlib import Path from typing import Optional, Union import datasets import tensorflow as tf from datasets import load_dataset import transformers from transformers import ( CONFIG_NAME, TF2_WEIGHTS_NAME, AutoConfig, AutoTokenizer, DefaultDataCollator, HfArgumentParser, PushToHubCallback, TFAutoModelForMultipleChoice, TFTrainingArguments, create_optimizer, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") logger = logging.getLogger(__name__) # region Helper classes and functions @dataclass class DataCollatorForMultipleChoice: """ Data collator that will dynamically pad the inputs for multiple choice received. Args: tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): The tokenizer used for encoding the data. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature.pop(label_name) for feature in features] batch_size = len(features) num_choices = len(features[0]["input_ids"]) flattened_features = [ [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ] flattened_features = list(chain(*flattened_features)) batch = self.tokenizer.pad( flattened_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="np", ) # Un-flatten batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()} # Add back labels batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64) return batch # endregion # region Arguments @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) def __post_init__(self): if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." # endregion def main(): # region Argument parsing # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag", model_args, data_args, framework="tensorflow") output_dir = Path(training_args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) # endregion # region Logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # endregion # region Checkpoints checkpoint = None if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir: if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file(): checkpoint = output_dir logger.info( f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this" " behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) else: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to continue regardless." ) # endregion # Set seed before initializing model. set_seed(training_args.seed) # region Load datasets # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) else: # Downloading and loading the swag dataset from the hub. raw_datasets = load_dataset( "swag", "regular", cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # When using your own dataset or a different dataset from swag, you will probably need to change this. ending_names = [f"ending{i}" for i in range(4)] context_name = "sent1" question_header_name = "sent2" # endregion # region Load model config and tokenizer if checkpoint is not None: config_path = training_args.output_dir elif model_args.config_name: config_path = model_args.config_name else: config_path = model_args.model_name_or_path # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( config_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # endregion # region Dataset preprocessing if data_args.max_seq_length is None: max_seq_length = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " "Picking 1024 instead. You can change that default value by passing --max_seq_length xxx." ) max_seq_length = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): first_sentences = [[context] * 4 for context in examples[context_name]] question_headers = examples[question_header_name] second_sentences = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) ] # Flatten out first_sentences = list(chain(*first_sentences)) second_sentences = list(chain(*second_sentences)) # Tokenize tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True, max_length=max_seq_length) # Un-flatten data = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()} return data if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if data_args.pad_to_max_length: data_collator = DefaultDataCollator(return_tensors="np") else: # custom class defined above, as HF has no data collator for multiple choice data_collator = DataCollatorForMultipleChoice(tokenizer) # endregion with training_args.strategy.scope(): # region Build model if checkpoint is None: model_path = model_args.model_name_or_path else: model_path = checkpoint model = TFAutoModelForMultipleChoice.from_pretrained( model_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas if training_args.do_train: num_train_steps = (len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs) if training_args.warmup_steps > 0: num_warmup_steps = training_args.warmup_steps elif training_args.warmup_ratio > 0: num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) else: num_warmup_steps = 0 optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm, ) else: optimizer = "sgd" # Just write anything because we won't be using it # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=optimizer, metrics=["accuracy"], jit_compile=training_args.xla) # endregion # region Preparing push_to_hub and model card push_to_hub_model_id = training_args.push_to_hub_model_id model_name = model_args.model_name_or_path.split("/")[-1] if not push_to_hub_model_id: push_to_hub_model_id = f"{model_name}-finetuned-multiplechoice" model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice"} if training_args.push_to_hub: callbacks = [ PushToHubCallback( output_dir=training_args.output_dir, hub_model_id=push_to_hub_model_id, hub_token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs, ) ] else: callbacks = [] # endregion # region Training eval_metrics = None if training_args.do_train: dataset_options = tf.data.Options() dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names # yourself if you use this method, whereas they are automatically inferred from the model input names when # using model.prepare_tf_dataset() # For more info see the docs: # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset tf_train_dataset = model.prepare_tf_dataset( train_dataset, shuffle=True, batch_size=total_train_batch_size, collate_fn=data_collator, ).with_options(dataset_options) if training_args.do_eval: validation_data = model.prepare_tf_dataset( eval_dataset, shuffle=False, batch_size=total_eval_batch_size, collate_fn=data_collator, drop_remainder=True, ).with_options(dataset_options) else: validation_data = None history = model.fit( tf_train_dataset, validation_data=validation_data, epochs=int(training_args.num_train_epochs), callbacks=callbacks, ) eval_metrics = {key: val[-1] for key, val in history.history.items()} # endregion # region Evaluation if training_args.do_eval and not training_args.do_train: dataset_options = tf.data.Options() dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF # Do a standalone evaluation pass tf_eval_dataset = model.prepare_tf_dataset( eval_dataset, shuffle=False, batch_size=total_eval_batch_size, collate_fn=data_collator, drop_remainder=True, ).with_options(dataset_options) eval_results = model.evaluate(tf_eval_dataset) eval_metrics = {"val_loss": eval_results[0], "val_accuracy": eval_results[1]} # endregion if eval_metrics is not None and training_args.output_dir is not None: output_eval_file = os.path.join(training_args.output_dir, "all_results.json") with open(output_eval_file, "w") as writer: writer.write(json.dumps(eval_metrics)) # region Push to hub if training_args.output_dir is not None and not training_args.push_to_hub: # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) # endregion if __name__ == "__main__": main()
transformers/examples/tensorflow/multiple-choice/run_swag.py/0
{ "file_path": "transformers/examples/tensorflow/multiple-choice/run_swag.py", "repo_id": "transformers", "token_count": 9932 }
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Translation example This script shows an example of training a *translation* model with the 🤗 Transformers library. For straightforward use-cases you may be able to use these scripts without modification, although we have also included comments in the code to indicate areas that you may need to adapt to your own projects. ### Multi-GPU and TPU usage By default, these scripts use a `MirroredStrategy` and will use multiple GPUs effectively if they are available. TPUs can also be used by passing the name of the TPU resource with the `--tpu` argument. ### Example commands and caveats MBart and some T5 models require special handling. T5 models `google-t5/t5-small`, `google-t5/t5-base`, `google-t5/t5-large`, `google-t5/t5-3b` and `google-t5/t5-11b` must use an additional argument: `--source_prefix "translate {source_lang} to {target_lang}"`. For example: ```bash python run_translation.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --source_lang en \ --target_lang ro \ --source_prefix "translate English to Romanian: " \ --dataset_name wmt16 \ --dataset_config_name ro-en \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=16 \ --per_device_eval_batch_size=16 \ --overwrite_output_dir ``` If you get a terrible BLEU score, make sure that you didn't forget to use the `--source_prefix` argument. For the aforementioned group of T5 models it's important to remember that if you switch to a different language pair, make sure to adjust the source and target values in all 3 language-specific command line argument: `--source_lang`, `--target_lang` and `--source_prefix`. MBart models require a different format for `--source_lang` and `--target_lang` values, e.g. instead of `en` it expects `en_XX`, for `ro` it expects `ro_RO`. The full MBart specification for language codes can be found [here](https://huggingface.co/facebook/mbart-large-cc25). For example: ```bash python run_translation.py \ --model_name_or_path facebook/mbart-large-en-ro \ --do_train \ --do_eval \ --dataset_name wmt16 \ --dataset_config_name ro-en \ --source_lang en_XX \ --target_lang ro_RO \ --output_dir /tmp/tst-translation \ --per_device_train_batch_size=16 \ --per_device_eval_batch_size=16 \ --overwrite_output_dir ```
transformers/examples/tensorflow/translation/README.md/0
{ "file_path": "transformers/examples/tensorflow/translation/README.md", "repo_id": "transformers", "token_count": 933 }
#!/usr/bin/env python # coding: utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTConfig, FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-en-de" tokenizer = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model config = FSMTConfig.from_pretrained(mname) config.update({ "d_model": 4, "encoder_layers": 1, "decoder_layers": 1, "encoder_ffn_dim": 4, "decoder_ffn_dim": 4, "encoder_attention_heads": 1, "decoder_attention_heads": 1}) tiny_model = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test batch = tokenizer(["Making tiny model"], return_tensors="pt") outputs = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save mname_tiny = "tiny-wmt19-en-de" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-de
transformers/scripts/fsmt/fsmt-make-tiny-model.py/0
{ "file_path": "transformers/scripts/fsmt/fsmt-make-tiny-model.py", "repo_id": "transformers", "token_count": 704 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import re import time from typing import Any, Callable, Dict, List, Optional, Tuple, Union from .. import is_torch_available from ..utils import logging as transformers_logging from ..utils.import_utils import is_pygments_available from .agent_types import AgentAudio, AgentImage from .default_tools import BASE_PYTHON_TOOLS, FinalAnswerTool, setup_default_tools from .llm_engine import HfApiEngine, MessageRole from .monitoring import Monitor from .prompts import ( DEFAULT_CODE_SYSTEM_PROMPT, DEFAULT_REACT_CODE_SYSTEM_PROMPT, DEFAULT_REACT_JSON_SYSTEM_PROMPT, PLAN_UPDATE_FINAL_PLAN_REDACTION, PROMPTS_FOR_INITIAL_PLAN, PROMPTS_FOR_PLAN_UPDATE, SUPPORTED_PLAN_TYPES, SYSTEM_PROMPT_FACTS, SYSTEM_PROMPT_FACTS_UPDATE, USER_PROMPT_FACTS_UPDATE, ) from .python_interpreter import LIST_SAFE_MODULES, evaluate_python_code from .tools import ( DEFAULT_TOOL_DESCRIPTION_TEMPLATE, Tool, get_tool_description_with_args, load_tool, ) if is_pygments_available(): from pygments import highlight from pygments.formatters import Terminal256Formatter from pygments.lexers import PythonLexer class CustomFormatter(logging.Formatter): grey = "\x1b[38;20m" bold_yellow = "\x1b[33;1m" red = "\x1b[31;20m" green = "\x1b[32;20m" bold_green = "\x1b[32;20;1m" bold_red = "\x1b[31;1m" bold_white = "\x1b[37;1m" orange = "\x1b[38;5;214m" bold_orange = "\x1b[38;5;214;1m" reset = "\x1b[0m" format = "%(message)s" FORMATS = { logging.DEBUG: grey + format + reset, logging.INFO: format, logging.WARNING: bold_yellow + format + reset, logging.ERROR: red + format + reset, logging.CRITICAL: bold_red + format + reset, 31: reset + format + reset, 32: green + format + reset, 33: bold_green + format + reset, 34: bold_white + format + reset, 35: orange + format + reset, 36: bold_orange + format + reset, } def format(self, record): log_fmt = self.FORMATS.get(record.levelno) formatter = logging.Formatter(log_fmt) return formatter.format(record) logger = transformers_logging.get_logger(__name__) logger.propagate = False ch = logging.StreamHandler() ch.setFormatter(CustomFormatter()) logger.addHandler(ch) def parse_json_blob(json_blob: str) -> Dict[str, str]: try: first_accolade_index = json_blob.find("{") last_accolade_index = [a.start() for a in list(re.finditer("}", json_blob))][-1] json_blob = json_blob[first_accolade_index : last_accolade_index + 1].replace('\\"', "'") json_data = json.loads(json_blob, strict=False) return json_data except json.JSONDecodeError as e: place = e.pos if json_blob[place - 1 : place + 2] == "},\n": raise ValueError( "JSON is invalid: you probably tried to provide multiple tool calls in one action. PROVIDE ONLY ONE TOOL CALL." ) raise ValueError( f"The JSON blob you used is invalid due to the following error: {e}.\n" f"JSON blob was: {json_blob}, decoding failed on that specific part of the blob:\n" f"'{json_blob[place-4:place+5]}'." ) except Exception as e: raise ValueError(f"Error in parsing the JSON blob: {e}") def parse_code_blob(code_blob: str) -> str: try: pattern = r"```(?:py|python)?\n(.*?)\n```" match = re.search(pattern, code_blob, re.DOTALL) return match.group(1).strip() except Exception as e: raise ValueError( f""" The code blob you used is invalid: due to the following error: {e} This means that the regex pattern {pattern} was not respected: make sure to include code with the correct pattern, for instance: Thoughts: Your thoughts Code: ```py # Your python code here ```<end_action>""" ) def parse_json_tool_call(json_blob: str) -> Tuple[str, Dict[str, str]]: json_blob = json_blob.replace("```json", "").replace("```", "") tool_call = parse_json_blob(json_blob) if "action" in tool_call and "action_input" in tool_call: return tool_call["action"], tool_call["action_input"] elif "action" in tool_call: return tool_call["action"], None else: raise ValueError( f"Missing keys: {[key for key in ['action', 'action_input'] if key not in tool_call]} in blob {tool_call}" ) def parse_text_tool_call(text: str) -> Tuple[str, Union[str, Dict[str, str]]]: """ Expects a text in the format: 'Action:', 'Action input:', 'Observation:'. 'Action input:' contains a json string with input arguments. """ try: if "Observation:" in text: text = text.split("Observation:")[0] if "Action:" in text: text = text.split("Action:")[1] tool_name, tool_input = text.split("Action input:") if "{" in tool_input: tool_input = parse_json_blob(tool_input) else: tool_input = tool_input.strip().replace('"', "") return tool_name.strip().replace('"', "").replace("\\", ""), tool_input except Exception as e: raise ValueError( f"Error in parsing the text tool call: {e}. Be sure to provide the correct format. DO NOT repeat your previous incorrect tool call." ) def to_text(input: Union[List[Dict[str, str]], Dict[str, str], str]) -> str: if isinstance(input, list): return "\n".join([m["content"] for m in input]) elif isinstance(input, dict): return input["content"] else: return input HUGGINGFACE_DEFAULT_TOOLS = {} _tools_are_initialized = False class Toolbox: """ The toolbox contains all tools that the agent can perform operations with, as well as a few methods to manage them. Args: tools (`List[Tool]`): The list of tools to instantiate the toolbox with add_base_tools (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to add the tools available within `transformers` to the toolbox. """ def __init__(self, tools: List[Tool], add_base_tools: bool = False): self._tools = {tool.name: tool for tool in tools} if add_base_tools: self.add_base_tools() self._load_tools_if_needed() def add_base_tools(self, add_python_interpreter: bool = False): global _tools_are_initialized global HUGGINGFACE_DEFAULT_TOOLS if not _tools_are_initialized: HUGGINGFACE_DEFAULT_TOOLS = setup_default_tools(logger) _tools_are_initialized = True for tool in HUGGINGFACE_DEFAULT_TOOLS.values(): if tool.name != "python_interpreter" or add_python_interpreter: self.add_tool(tool) self._load_tools_if_needed() @property def tools(self) -> Dict[str, Tool]: """Get all tools currently in the toolbox""" return self._tools def show_tool_descriptions(self, tool_description_template: str = None) -> str: """ Returns the description of all tools in the toolbox Args: tool_description_template (`str`, *optional*): The template to use to describe the tools. If not provided, the default template will be used. """ return "\n".join( [get_tool_description_with_args(tool, tool_description_template) for tool in self._tools.values()] ) def add_tool(self, tool: Tool): """ Adds a tool to the toolbox Args: tool (`Tool`): The tool to add to the toolbox. """ if tool.name in self._tools: raise KeyError(f"Error: tool '{tool.name}' already exists in the toolbox.") self._tools[tool.name] = tool def remove_tool(self, tool_name: str): """ Removes a tool from the toolbox Args: tool_name (`str`): The tool to remove from the toolbox. """ if tool_name not in self._tools: raise KeyError( f"Error: tool {tool_name} not found in toolbox for removal, should be instead one of {list(self._tools.keys())}." ) del self._tools[tool_name] def update_tool(self, tool: Tool): """ Updates a tool in the toolbox according to its name. Args: tool (`Tool`): The tool to update to the toolbox. """ if tool.name not in self._tools: raise KeyError( f"Error: tool {tool.name} not found in toolbox for update, should be instead one of {list(self._tools.keys())}." ) self._tools[tool.name] = tool def clear_toolbox(self): """Clears the toolbox""" self._tools = {} def _load_tools_if_needed(self): for name, tool in self._tools.items(): if not isinstance(tool, Tool): task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id self._tools[name] = load_tool(task_or_repo_id) def __repr__(self): toolbox_description = "Toolbox contents:\n" for tool in self._tools.values(): toolbox_description += f"\t{tool.name}: {tool.description}\n" return toolbox_description class AgentError(Exception): """Base class for other agent-related exceptions""" def __init__(self, message): super().__init__(message) self.message = message class AgentParsingError(AgentError): """Exception raised for errors in parsing in the agent""" pass class AgentExecutionError(AgentError): """Exception raised for errors in execution in the agent""" pass class AgentMaxIterationsError(AgentError): """Exception raised for errors in execution in the agent""" pass class AgentGenerationError(AgentError): """Exception raised for errors in generation in the agent""" pass def format_prompt_with_tools(toolbox: Toolbox, prompt_template: str, tool_description_template: str) -> str: tool_descriptions = toolbox.show_tool_descriptions(tool_description_template) prompt = prompt_template.replace("<<tool_descriptions>>", tool_descriptions) if "<<tool_names>>" in prompt: tool_names = [f"'{tool_name}'" for tool_name in toolbox.tools.keys()] prompt = prompt.replace("<<tool_names>>", ", ".join(tool_names)) return prompt def show_agents_descriptions(managed_agents: list): managed_agents_descriptions = """ You can also give requests to team members. Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'request', a long string explaning your request. Given that this team member is a real human, you should be very verbose in your request. Here is a list of the team members that you can call:""" for agent in managed_agents.values(): managed_agents_descriptions += f"\n- {agent.name}: {agent.description}" return managed_agents_descriptions def format_prompt_with_managed_agents_descriptions(prompt_template, managed_agents=None) -> str: if managed_agents is not None: return prompt_template.replace("<<managed_agents_descriptions>>", show_agents_descriptions(managed_agents)) else: return prompt_template.replace("<<managed_agents_descriptions>>", "") def format_prompt_with_imports(prompt_template: str, authorized_imports: List[str]) -> str: if "<<authorized_imports>>" not in prompt_template: raise AgentError("Tag '<<authorized_imports>>' should be provided in the prompt.") return prompt_template.replace("<<authorized_imports>>", str(authorized_imports)) class Agent: def __init__( self, tools: Union[List[Tool], Toolbox], llm_engine: Callable = None, system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, additional_args: Dict = {}, max_iterations: int = 6, tool_parser: Optional[Callable] = None, add_base_tools: bool = False, verbose: int = 0, grammar: Optional[Dict[str, str]] = None, managed_agents: Optional[List] = None, step_callbacks: Optional[List[Callable]] = None, monitor_metrics: bool = True, ): if system_prompt is None: system_prompt = DEFAULT_REACT_CODE_SYSTEM_PROMPT if tool_parser is None: tool_parser = parse_json_tool_call self.agent_name = self.__class__.__name__ self.llm_engine = llm_engine self.system_prompt_template = system_prompt self.tool_description_template = ( tool_description_template if tool_description_template else DEFAULT_TOOL_DESCRIPTION_TEMPLATE ) self.additional_args = additional_args self.max_iterations = max_iterations self.logger = logger self.tool_parser = tool_parser self.grammar = grammar self.managed_agents = None if managed_agents is not None: self.managed_agents = {agent.name: agent for agent in managed_agents} if isinstance(tools, Toolbox): self._toolbox = tools if add_base_tools: if not is_torch_available(): raise ImportError("Using the base tools requires torch to be installed.") self._toolbox.add_base_tools(add_python_interpreter=(self.__class__ == ReactJsonAgent)) else: self._toolbox = Toolbox(tools, add_base_tools=add_base_tools) self._toolbox.add_tool(FinalAnswerTool()) self.system_prompt = format_prompt_with_tools( self._toolbox, self.system_prompt_template, self.tool_description_template ) self.system_prompt = format_prompt_with_managed_agents_descriptions(self.system_prompt, self.managed_agents) self.prompt = None self.logs = [] self.task = None if verbose == 0: logger.setLevel(logging.WARNING) elif verbose == 1: logger.setLevel(logging.INFO) elif verbose == 2: logger.setLevel(logging.DEBUG) # Initialize step callbacks self.step_callbacks = step_callbacks if step_callbacks is not None else [] # Initialize Monitor if monitor_metrics is True self.monitor = None if monitor_metrics: self.monitor = Monitor(self.llm_engine) self.step_callbacks.append(self.monitor.update_metrics) @property def toolbox(self) -> Toolbox: """Get the toolbox currently available to the agent""" return self._toolbox def initialize_for_run(self): self.token_count = 0 self.system_prompt = format_prompt_with_tools( self._toolbox, self.system_prompt_template, self.tool_description_template, ) self.system_prompt = format_prompt_with_managed_agents_descriptions(self.system_prompt, self.managed_agents) if hasattr(self, "authorized_imports"): self.system_prompt = format_prompt_with_imports( self.system_prompt, list(set(LIST_SAFE_MODULES) | set(self.authorized_imports)) ) self.logs = [{"system_prompt": self.system_prompt, "task": self.task}] self.logger.log(33, "======== New task ========") self.logger.log(34, self.task) self.logger.debug("System prompt is as follows:") self.logger.debug(self.system_prompt) def write_inner_memory_from_logs(self, summary_mode: Optional[bool] = False) -> List[Dict[str, str]]: """ Reads past llm_outputs, actions, and observations or errors from the logs into a series of messages that can be used as input to the LLM. """ prompt_message = {"role": MessageRole.SYSTEM, "content": self.logs[0]["system_prompt"]} task_message = { "role": MessageRole.USER, "content": "Task: " + self.logs[0]["task"], } if summary_mode: memory = [task_message] else: memory = [prompt_message, task_message] for i, step_log in enumerate(self.logs[1:]): if "llm_output" in step_log and not summary_mode: thought_message = {"role": MessageRole.ASSISTANT, "content": step_log["llm_output"].strip()} memory.append(thought_message) if "facts" in step_log: thought_message = { "role": MessageRole.ASSISTANT, "content": "[FACTS LIST]:\n" + step_log["facts"].strip(), } memory.append(thought_message) if "plan" in step_log and not summary_mode: thought_message = {"role": MessageRole.ASSISTANT, "content": "[PLAN]:\n" + step_log["plan"].strip()} memory.append(thought_message) if "tool_call" in step_log and summary_mode: tool_call_message = { "role": MessageRole.ASSISTANT, "content": f"[STEP {i} TOOL CALL]: " + str(step_log["tool_call"]).strip(), } memory.append(tool_call_message) if "task" in step_log: tool_call_message = { "role": MessageRole.USER, "content": "New task:\n" + step_log["task"], } memory.append(tool_call_message) if "error" in step_log or "observation" in step_log: if "error" in step_log: message_content = ( f"[OUTPUT OF STEP {i}] -> Error:\n" + str(step_log["error"]) + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" ) elif "observation" in step_log: message_content = f"[OUTPUT OF STEP {i}] -> Observation:\n{step_log['observation']}" tool_response_message = {"role": MessageRole.TOOL_RESPONSE, "content": message_content} memory.append(tool_response_message) return memory def get_succinct_logs(self): return [{key: value for key, value in log.items() if key != "agent_memory"} for log in self.logs] def extract_action(self, llm_output: str, split_token: str) -> str: """ Parse action from the LLM output Args: llm_output (`str`): Output of the LLM split_token (`str`): Separator for the action. Should match the example in the system prompt. """ try: split = llm_output.split(split_token) rationale, action = ( split[-2], split[-1], ) # NOTE: using indexes starting from the end solves for when you have more than one split_token in the output except Exception as e: self.logger.error(e, exc_info=1) raise AgentParsingError( f"Error: No '{split_token}' token provided in your output.\nYour output:\n{llm_output}\n. Be sure to include an action, prefaced with '{split_token}'!" ) return rationale.strip(), action.strip() def execute_tool_call(self, tool_name: str, arguments: Dict[str, str]) -> Any: """ Execute tool with the provided input and returns the result. This method replaces arguments with the actual values from the state if they refer to state variables. Args: tool_name (`str`): Name of the Tool to execute (should be one from self.toolbox). arguments (Dict[str, str]): Arguments passed to the Tool. """ available_tools = self.toolbox.tools if self.managed_agents is not None: available_tools = {**available_tools, **self.managed_agents} if tool_name not in available_tools: error_msg = f"Error: unknown tool {tool_name}, should be instead one of {list(available_tools.keys())}." self.logger.error(error_msg, exc_info=1) raise AgentExecutionError(error_msg) try: if isinstance(arguments, str): observation = available_tools[tool_name](arguments) elif isinstance(arguments, dict): for key, value in arguments.items(): # if the value is the name of a state variable like "image.png", replace it with the actual value if isinstance(value, str) and value in self.state: arguments[key] = self.state[value] observation = available_tools[tool_name](**arguments) else: raise AgentExecutionError( f"Arguments passed to tool should be a dict or string: got a {type(arguments)}." ) return observation except Exception as e: if tool_name in self.toolbox.tools: raise AgentExecutionError( f"Error in tool call execution: {e}\nYou should only use this tool with a correct input.\n" f"As a reminder, this tool's description is the following:\n{get_tool_description_with_args(available_tools[tool_name])}" ) elif tool_name in self.managed_agents: raise AgentExecutionError( f"Error in calling team member: {e}\nYou should only ask this team member with a correct request.\n" f"As a reminder, this team member's description is the following:\n{available_tools[tool_name]}" ) def log_rationale_code_action(self, rationale: str, code_action: str) -> None: self.logger.warning("=== Agent thoughts:") self.logger.log(31, rationale) self.logger.warning(">>> Agent is executing the code below:") if is_pygments_available(): self.logger.log( 31, highlight(code_action, PythonLexer(ensurenl=False), Terminal256Formatter(style="nord")) ) else: self.logger.log(31, code_action) self.logger.warning("====") def run(self, **kwargs): """To be implemented in the child class""" raise NotImplementedError class CodeAgent(Agent): """ A class for an agent that solves the given task using a single block of code. It plans all its actions, then executes all in one shot. """ def __init__( self, tools: List[Tool], llm_engine: Optional[Callable] = None, system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, grammar: Optional[Dict[str, str]] = None, additional_authorized_imports: Optional[List[str]] = None, **kwargs, ): if llm_engine is None: llm_engine = HfApiEngine() if system_prompt is None: system_prompt = DEFAULT_CODE_SYSTEM_PROMPT if tool_description_template is None: tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, **kwargs, ) if not is_pygments_available(): transformers_logging.warning_once( logger, "pygments isn't installed. Installing pygments will enable color syntax highlighting in the " "CodeAgent.", ) self.python_evaluator = evaluate_python_code self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else [] self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports)) self.system_prompt = self.system_prompt.replace("<<authorized_imports>>", str(self.authorized_imports)) def parse_code_blob(self, result: str) -> str: """ Override this method if you want to change the way the code is cleaned in the `run` method. """ return parse_code_blob(result) def run(self, task: str, return_generated_code: bool = False, **kwargs): """ Runs the agent for the given task. Args: task (`str`): The task to perform return_generated_code (`bool`, *optional*, defaults to `False`): Whether to return the generated code instead of running it kwargs (additional keyword arguments, *optional*): Any keyword argument to send to the agent when evaluating the code. Example: ```py from transformers.agents import CodeAgent agent = CodeAgent(tools=[]) agent.run("What is the result of 2 power 3.7384?") ``` """ self.task = task if len(kwargs) > 0: self.task += f"\nYou have been provided with these initial arguments: {str(kwargs)}." self.state = kwargs.copy() self.initialize_for_run() # Run LLM prompt_message = {"role": MessageRole.SYSTEM, "content": self.system_prompt} task_message = { "role": MessageRole.USER, "content": "Task: " + self.task, } self.prompt = [prompt_message, task_message] self.logger.info("====Executing with this prompt====") self.logger.info(self.prompt) additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine(self.prompt, stop_sequences=["<end_action>"], **additional_args) if return_generated_code: return llm_output # Parse try: rationale, code_action = self.extract_action(llm_output=llm_output, split_token="Code:") except Exception as e: self.logger.debug( f"Error in extracting action, trying to parse the whole output as code. Error trace: {e}" ) rationale, code_action = "", llm_output try: code_action = self.parse_code_blob(code_action) except Exception as e: error_msg = f"Error in code parsing: {e}. Be sure to provide correct code" self.logger.error(error_msg, exc_info=1) return error_msg # Execute self.log_rationale_code_action(rationale, code_action) try: available_tools = {**BASE_PYTHON_TOOLS.copy(), **self.toolbox.tools} output = self.python_evaluator( code_action, static_tools=available_tools, custom_tools={}, state=self.state, authorized_imports=self.authorized_imports, ) self.logger.info(self.state["print_outputs"]) return output except Exception as e: error_msg = f"Error in execution: {e}. Be sure to provide correct code." self.logger.error(error_msg, exc_info=1) return error_msg class ReactAgent(Agent): """ This agent that solves the given task step by step, using the ReAct framework: While the objective is not reached, the agent will perform a cycle of thinking and acting. The action will be parsed from the LLM output: it consists in calls to tools from the toolbox, with arguments chosen by the LLM engine. """ def __init__( self, tools: List[Tool], llm_engine: Optional[Callable] = None, system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, grammar: Optional[Dict[str, str]] = None, plan_type: Optional[str] = None, planning_interval: Optional[int] = None, **kwargs, ): if llm_engine is None: llm_engine = HfApiEngine() if system_prompt is None: system_prompt = DEFAULT_REACT_CODE_SYSTEM_PROMPT if tool_description_template is None: tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE if plan_type is None: plan_type = SUPPORTED_PLAN_TYPES[0] else: assert plan_type in SUPPORTED_PLAN_TYPES, f"plan type {plan_type} is not supported" super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, **kwargs, ) self.planning_interval = planning_interval self.plan_type = plan_type def provide_final_answer(self, task) -> str: """ This method provides a final answer to the task, based on the logs of the agent's interactions. """ self.prompt = [ { "role": MessageRole.SYSTEM, "content": "An agent tried to answer an user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:", } ] self.prompt += self.write_inner_memory_from_logs()[1:] self.prompt += [ { "role": MessageRole.USER, "content": f"Based on the above, please provide an answer to the following user request:\n{task}", } ] try: return self.llm_engine(self.prompt) except Exception as e: return f"Error in generating final llm output: {e}." def run(self, task: str, stream: bool = False, reset: bool = True, **kwargs): """ Runs the agent for the given task. Args: task (`str`): The task to perform Example: ```py from transformers.agents import ReactCodeAgent agent = ReactCodeAgent(tools=[]) agent.run("What is the result of 2 power 3.7384?") ``` """ self.task = task if len(kwargs) > 0: self.task += f"\nYou have been provided with these initial arguments: {str(kwargs)}." self.state = kwargs.copy() if reset: self.initialize_for_run() else: self.logs.append({"task": task}) if stream: return self.stream_run(task) else: return self.direct_run(task) def stream_run(self, task: str): """ Runs the agent in streaming mode, yielding steps as they are executed: should be launched only in the `run` method. """ final_answer = None iteration = 0 while final_answer is None and iteration < self.max_iterations: step_start_time = time.time() step_log_entry = {"iteration": iteration, "start_time": step_start_time} try: self.step(step_log_entry) if "final_answer" in step_log_entry: final_answer = step_log_entry["final_answer"] except AgentError as e: self.logger.error(e, exc_info=1) step_log_entry["error"] = e finally: step_end_time = time.time() step_log_entry["step_end_time"] = step_end_time step_log_entry["step_duration"] = step_end_time - step_start_time self.logs.append(step_log_entry) for callback in self.step_callbacks: callback(step_log_entry) iteration += 1 yield step_log_entry if final_answer is None and iteration == self.max_iterations: error_message = "Reached max iterations." final_step_log = {"error": AgentMaxIterationsError(error_message)} self.logs.append(final_step_log) self.logger.error(error_message, exc_info=1) final_answer = self.provide_final_answer(task) final_step_log["final_answer"] = final_answer final_step_log["step_duration"] = 0 for callback in self.step_callbacks: callback(final_step_log) yield final_step_log yield final_answer def direct_run(self, task: str): """ Runs the agent in direct mode, returning outputs only at the end: should be launched only in the `run` method. """ final_answer = None iteration = 0 while final_answer is None and iteration < self.max_iterations: step_start_time = time.time() step_log_entry = {"iteration": iteration, "start_time": step_start_time} try: if self.planning_interval is not None and iteration % self.planning_interval == 0: self.planning_step(task, is_first_step=(iteration == 0), iteration=iteration) self.step(step_log_entry) if "final_answer" in step_log_entry: final_answer = step_log_entry["final_answer"] except AgentError as e: self.logger.error(e, exc_info=1) step_log_entry["error"] = e finally: step_end_time = time.time() step_log_entry["step_end_time"] = step_end_time step_log_entry["step_duration"] = step_end_time - step_start_time self.logs.append(step_log_entry) for callback in self.step_callbacks: callback(step_log_entry) iteration += 1 if final_answer is None and iteration == self.max_iterations: error_message = "Reached max iterations." final_step_log = {"error": AgentMaxIterationsError(error_message)} self.logs.append(final_step_log) self.logger.error(error_message, exc_info=1) final_answer = self.provide_final_answer(task) final_step_log["final_answer"] = final_answer final_step_log["step_duration"] = 0 for callback in self.step_callbacks: callback(final_step_log) return final_answer def planning_step(self, task, is_first_step: bool = False, iteration: int = None): """ Used periodically by the agent to plan the next steps to reach the objective. Args: task (`str`): The task to perform is_first_step (`bool`): If this step is not the first one, the plan should be an update over a previous plan. iteration (`int`): The number of the current step, used as an indication for the LLM. """ if is_first_step: message_prompt_facts = {"role": MessageRole.SYSTEM, "content": SYSTEM_PROMPT_FACTS} message_prompt_task = { "role": MessageRole.USER, "content": f"""Here is the task: ``` {task} ``` Now begin!""", } answer_facts = self.llm_engine([message_prompt_facts, message_prompt_task]) message_system_prompt_plan = { "role": MessageRole.SYSTEM, "content": PROMPTS_FOR_INITIAL_PLAN[self.plan_type]["system"], } message_user_prompt_plan = { "role": MessageRole.USER, "content": PROMPTS_FOR_INITIAL_PLAN[self.plan_type]["user"].format( task=task, tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template), managed_agents_descriptions=( show_agents_descriptions(self.managed_agents) if self.managed_agents is not None else "" ), answer_facts=answer_facts, ), } answer_plan = self.llm_engine( [message_system_prompt_plan, message_user_prompt_plan], stop_sequences=["<end_plan>"] ) final_plan_redaction = f"""Here is the plan of action that I will follow to solve the task: ``` {answer_plan} ```""" final_facts_redaction = f"""Here are the facts that I know so far: ``` {answer_facts} ```""".strip() self.logs.append({"plan": final_plan_redaction, "facts": final_facts_redaction}) self.logger.log(36, "===== Initial plan =====") self.logger.log(35, final_plan_redaction) else: # update plan agent_memory = self.write_inner_memory_from_logs( summary_mode=False ) # This will not log the plan but will log facts # Redact updated facts facts_update_system_prompt = { "role": MessageRole.SYSTEM, "content": SYSTEM_PROMPT_FACTS_UPDATE, } facts_update_message = { "role": MessageRole.USER, "content": USER_PROMPT_FACTS_UPDATE, } facts_update = self.llm_engine([facts_update_system_prompt] + agent_memory + [facts_update_message]) # Redact updated plan plan_update_message = { "role": MessageRole.SYSTEM, "content": PROMPTS_FOR_PLAN_UPDATE[self.plan_type]["system"].format(task=task), } plan_update_message_user = { "role": MessageRole.USER, "content": PROMPTS_FOR_PLAN_UPDATE[self.plan_type]["user"].format( task=task, tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template), managed_agents_descriptions=( show_agents_descriptions(self.managed_agents) if self.managed_agents is not None else "" ), facts_update=facts_update, remaining_steps=(self.max_iterations - iteration), ), } plan_update = self.llm_engine( [plan_update_message] + agent_memory + [plan_update_message_user], stop_sequences=["<end_plan>"] ) # Log final facts and plan final_plan_redaction = PLAN_UPDATE_FINAL_PLAN_REDACTION.format(task=task, plan_update=plan_update) final_facts_redaction = f"""Here is the updated list of the facts that I know: ``` {facts_update} ```""" self.logs.append({"plan": final_plan_redaction, "facts": final_facts_redaction}) self.logger.log(36, "===== Updated plan =====") self.logger.log(35, final_plan_redaction) class ReactJsonAgent(ReactAgent): """ This agent that solves the given task step by step, using the ReAct framework: While the objective is not reached, the agent will perform a cycle of thinking and acting. The tool calls will be formulated by the LLM in JSON format, then parsed and executed. """ def __init__( self, tools: List[Tool], llm_engine: Optional[Callable] = None, system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, grammar: Optional[Dict[str, str]] = None, planning_interval: Optional[int] = None, **kwargs, ): if llm_engine is None: llm_engine = HfApiEngine() if system_prompt is None: system_prompt = DEFAULT_REACT_JSON_SYSTEM_PROMPT if tool_description_template is None: tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, planning_interval=planning_interval, **kwargs, ) def step(self, log_entry: Dict[str, Any]): """ Perform one step in the ReAct framework: the agent thinks, acts, and observes the result. The errors are raised here, they are caught and logged in the run() method. """ agent_memory = self.write_inner_memory_from_logs() self.prompt = agent_memory self.logger.debug("===== New step =====") # Add new step in logs log_entry["agent_memory"] = agent_memory.copy() self.logger.info("===== Calling LLM with this last message: =====") self.logger.info(self.prompt[-1]) try: additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine( self.prompt, stop_sequences=["<end_action>", "Observation:"], **additional_args ) except Exception as e: raise AgentGenerationError(f"Error in generating llm output: {e}.") self.logger.debug("===== Output message of the LLM: =====") self.logger.debug(llm_output) log_entry["llm_output"] = llm_output # Parse self.logger.debug("===== Extracting action =====") rationale, action = self.extract_action(llm_output=llm_output, split_token="Action:") try: tool_name, arguments = self.tool_parser(action) except Exception as e: raise AgentParsingError(f"Could not parse the given action: {e}.") log_entry["rationale"] = rationale log_entry["tool_call"] = {"tool_name": tool_name, "tool_arguments": arguments} # Execute self.logger.warning("=== Agent thoughts:") self.logger.log(31, rationale) self.logger.warning(f">>> Calling tool: '{tool_name}' with arguments: {arguments}") if tool_name == "final_answer": if isinstance(arguments, dict): if "answer" in arguments: answer = arguments["answer"] if ( isinstance(answer, str) and answer in self.state.keys() ): # if the answer is a state variable, return the value answer = self.state[answer] else: answer = arguments else: answer = arguments log_entry["final_answer"] = answer return answer else: if arguments is None: arguments = {} observation = self.execute_tool_call(tool_name, arguments) observation_type = type(observation) if observation_type in [AgentImage, AgentAudio]: if observation_type == AgentImage: observation_name = "image.png" elif observation_type == AgentAudio: observation_name = "audio.mp3" # TODO: observation naming could allow for different names of same type self.state[observation_name] = observation updated_information = f"Stored '{observation_name}' in memory." else: updated_information = str(observation).strip() self.logger.info(updated_information) log_entry["observation"] = updated_information return log_entry class ReactCodeAgent(ReactAgent): """ This agent that solves the given task step by step, using the ReAct framework: While the objective is not reached, the agent will perform a cycle of thinking and acting. The tool calls will be formulated by the LLM in code format, then parsed and executed. """ def __init__( self, tools: List[Tool], llm_engine: Optional[Callable] = None, system_prompt: Optional[str] = None, tool_description_template: Optional[str] = None, grammar: Optional[Dict[str, str]] = None, additional_authorized_imports: Optional[List[str]] = None, planning_interval: Optional[int] = None, **kwargs, ): if llm_engine is None: llm_engine = HfApiEngine() if system_prompt is None: system_prompt = DEFAULT_REACT_CODE_SYSTEM_PROMPT if tool_description_template is None: tool_description_template = DEFAULT_TOOL_DESCRIPTION_TEMPLATE super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, planning_interval=planning_interval, **kwargs, ) if not is_pygments_available(): transformers_logging.warning_once( logger, "pygments isn't installed. Installing pygments will enable color syntax highlighting in the " "ReactCodeAgent.", ) self.python_evaluator = evaluate_python_code self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else [] self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports)) self.system_prompt = self.system_prompt.replace("<<authorized_imports>>", str(self.authorized_imports)) self.custom_tools = {} def step(self, log_entry: Dict[str, Any]): """ Perform one step in the ReAct framework: the agent thinks, acts, and observes the result. The errors are raised here, they are caught and logged in the run() method. """ agent_memory = self.write_inner_memory_from_logs() self.prompt = agent_memory.copy() self.logger.debug("===== New step =====") # Add new step in logs log_entry["agent_memory"] = agent_memory.copy() self.logger.info("===== Calling LLM with these last messages: =====") self.logger.info(self.prompt[-2:]) try: additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine( self.prompt, stop_sequences=["<end_action>", "Observation:"], **additional_args ) except Exception as e: raise AgentGenerationError(f"Error in generating llm output: {e}.") self.logger.debug("=== Output message of the LLM:") self.logger.debug(llm_output) log_entry["llm_output"] = llm_output # Parse self.logger.debug("=== Extracting action ===") try: rationale, raw_code_action = self.extract_action(llm_output=llm_output, split_token="Code:") except Exception as e: self.logger.debug(f"Error in extracting action, trying to parse the whole output. Error trace: {e}") rationale, raw_code_action = llm_output, llm_output try: code_action = parse_code_blob(raw_code_action) except Exception as e: error_msg = f"Error in code parsing: {e}. Make sure to provide correct code" raise AgentParsingError(error_msg) log_entry["rationale"] = rationale log_entry["tool_call"] = {"tool_name": "code interpreter", "tool_arguments": code_action} # Execute self.log_rationale_code_action(rationale, code_action) try: static_tools = { **BASE_PYTHON_TOOLS.copy(), **self.toolbox.tools, } if self.managed_agents is not None: static_tools = {**static_tools, **self.managed_agents} result = self.python_evaluator( code_action, static_tools=static_tools, custom_tools=self.custom_tools, state=self.state, authorized_imports=self.authorized_imports, ) self.logger.warning("Print outputs:") self.logger.log(32, self.state["print_outputs"]) observation = "Print outputs:\n" + self.state["print_outputs"] if result is not None: self.logger.warning("Last output from code snippet:") self.logger.log(32, str(result)) observation += "Last output from code snippet:\n" + str(result)[:100000] log_entry["observation"] = observation except Exception as e: error_msg = f"Code execution failed due to the following error:\n{str(e)}" if "'dict' object has no attribute 'read'" in str(e): error_msg += "\nYou get this error because you passed a dict as input for one of the arguments instead of a string." raise AgentExecutionError(error_msg) for line in code_action.split("\n"): if line[: len("final_answer")] == "final_answer": self.logger.log(33, "Final answer:") self.logger.log(32, result) log_entry["final_answer"] = result return result LENGTH_TRUNCATE_REPORTS = 1000 class ManagedAgent: def __init__(self, agent, name, description, additional_prompting=None, provide_run_summary=False): self.agent = agent self.name = name self.description = description self.additional_prompting = additional_prompting self.provide_run_summary = provide_run_summary def write_full_task(self, task): full_task = f"""You're a helpful agent named '{self.name}'. You have been submitted this task by your manager. --- Task: {task} --- You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible so that they have a clear understanding of the answer. Your final_answer WILL HAVE to contain these parts: ### 1. Task outcome (short version): ### 2. Task outcome (extremely detailed version): ### 3. Additional context (if relevant): Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost. And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback. <<additional_prompting>>""" if self.additional_prompting: full_task = full_task.replace("\n<<additional_prompting>>", self.additional_prompting).strip() else: full_task = full_task.replace("\n<<additional_prompting>>", "").strip() return full_task def __call__(self, request, **kwargs): full_task = self.write_full_task(request) output = self.agent.run(full_task, **kwargs) if self.provide_run_summary: answer = f"Here is the final answer from your managed agent '{self.name}':\n" answer += str(output) answer += f"\n\nFor more detail, find below a summary of this agent's work:\nSUMMARY OF WORK FROM AGENT '{self.name}':\n" for message in self.agent.write_inner_memory_from_logs(summary_mode=True): content = message["content"] if len(str(content)) < LENGTH_TRUNCATE_REPORTS or "[FACTS LIST]" in str(content): answer += "\n" + str(content) + "\n---" else: answer += ( "\n" + str(content)[:LENGTH_TRUNCATE_REPORTS] + "\n(...Step was truncated because too long)...\n---" ) answer += f"\nEND OF SUMMARY OF WORK FROM AGENT '{self.name}'." return answer else: return output
transformers/src/transformers/agents/agents.py/0
{ "file_path": "transformers/src/transformers/agents/agents.py", "repo_id": "transformers", "token_count": 22998 }
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities to convert slow tokenizers in their fast tokenizers counterparts. All the conversions are grouped here to gather SentencePiece dependencies outside of the fast tokenizers files and allow to make our dependency on SentencePiece optional. """ import warnings from typing import Dict, List, Tuple from packaging import version from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors from tokenizers.models import BPE, Unigram, WordPiece from .utils import is_protobuf_available, is_sentencepiece_available, logging, requires_backends from .utils.import_utils import PROTOBUF_IMPORT_ERROR logger = logging.get_logger(__name__) def import_protobuf(error_message=""): if is_sentencepiece_available(): from sentencepiece import sentencepiece_model_pb2 return sentencepiece_model_pb2 if is_protobuf_available(): import google.protobuf if version.parse(google.protobuf.__version__) < version.parse("4.0.0"): from transformers.utils import sentencepiece_model_pb2 else: from transformers.utils import sentencepiece_model_pb2_new as sentencepiece_model_pb2 return sentencepiece_model_pb2 else: raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message)) def _get_prepend_scheme(add_prefix_space: bool, original_tokenizer) -> str: if add_prefix_space: prepend_scheme = "always" if not getattr(original_tokenizer, "legacy", True): prepend_scheme = "first" else: prepend_scheme = "never" return prepend_scheme def generate_merges(vocab, vocab_scores): reverse = vocab_scores is not None vocab_scores = dict(vocab_scores) if reverse else vocab merges = [] for merge, piece_score in vocab_scores.items(): local = [] for index in range(1, len(merge)): piece_l, piece_r = merge[:index], merge[index:] if piece_l in vocab and piece_r in vocab: local.append((piece_l, piece_r, piece_score)) local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]])) merges.extend(local) merges = sorted(merges, key=lambda val: (val[2], len(val[0]), len(val[1])), reverse=reverse) merges = [(val[0], val[1]) for val in merges] return merges class SentencePieceExtractor: """ Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece """ def __init__(self, model: str): requires_backends(self, "sentencepiece") from sentencepiece import SentencePieceProcessor self.sp = SentencePieceProcessor() self.sp.Load(model) def extract(self, vocab_scores=None) -> Tuple[Dict[str, int], List[Tuple]]: """ By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to order the merges with respect to the piece scores instead. """ sp = self.sp vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())} merges = generate_merges(vocab, vocab_scores) return vocab, merges class GemmaSentencePieceExtractor(SentencePieceExtractor): def extract(self, vocab_scores=None) -> Tuple[Dict[str, int], List[Tuple]]: """ By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to order the merges with respect to the piece scores instead. """ sp = self.sp vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())} # there is a missing token in the vocab. We have to do this to support merges # "<0x09>" is the bytefallback for `\t` vocab["\t"] = vocab.get("<0x09>") merges = generate_merges(vocab, vocab_scores) return vocab, merges def check_number_comma(piece: str) -> bool: return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit() class Converter: def __init__(self, original_tokenizer): self.original_tokenizer = original_tokenizer def converted(self) -> Tokenizer: raise NotImplementedError() class BertConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class SplinterConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) question = str(self.original_tokenizer.question_token) dot = "." cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id question_token_id = self.original_tokenizer.question_token_id dot_token_id = self.original_tokenizer.convert_tokens_to_ids(".") if self.original_tokenizer.padding_side == "right": pair = f"{cls}:0 $A:0 {question} {dot} {sep}:0 $B:1 {sep}:1" else: pair = f"{cls}:0 $A:0 {sep}:0 $B:1 {question} {dot} {sep}:1" tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=pair, special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), (question, question_token_id), (dot, dot_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class FunnelConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:2 $A:0 {sep}:0", # token_type_id is 2 for Funnel transformer pair=f"{cls}:2 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class MPNetConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 {sep}:0 $B:1 {sep}:1", # MPNet uses two [SEP] tokens special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class OpenAIGPTConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) unk_token = self.original_tokenizer.unk_token tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, unk_token=str(unk_token), end_of_word_suffix="</w>", fuse_unk=False, ) ) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix="</w>") return tokenizer class GPT2Converter(Converter): def converted(self, vocab: Dict[str, int] = None, merges: List[Tuple[str, str]] = None) -> Tokenizer: if not vocab: vocab = self.original_tokenizer.encoder if not merges: merges = list(self.original_tokenizer.bpe_ranks) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) add_prefix_space = getattr(self.original_tokenizer, "add_prefix_space", False) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space) tokenizer.decoder = decoders.ByteLevel() if getattr(self.original_tokenizer, "add_bos_token", False): bos = self.original_tokenizer.bos_token bos_token_id = self.original_tokenizer.bos_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{bos}:0 $A:0", pair=f"{bos}:0 $A:0 $B:1", special_tokens=[ (bos, bos_token_id), ], ) else: # XXX trim_offsets=False actually means this post_processor doesn't # really do anything. tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) return tokenizer class HerbertConverter(Converter): def converted(self) -> Tokenizer: tokenizer_info_str = "#version:" token_suffix = "</w>" vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) if tokenizer_info_str in merges[0][0]: merges = merges[1:] tokenizer = Tokenizer( BPE( vocab, merges, dropout=None, unk_token=self.original_tokenizer.unk_token, end_of_word_suffix=token_suffix, ) ) tokenizer.normalizer = normalizers.BertNormalizer(lowercase=False, strip_accents=False) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix=token_suffix) tokenizer.post_processor = processors.BertProcessing( sep=(self.original_tokenizer.sep_token, self.original_tokenizer.sep_token_id), cls=(self.original_tokenizer.cls_token, self.original_tokenizer.cls_token_id), ) return tokenizer class Qwen2Converter(Converter): def converted(self, vocab: Dict[str, int] = None, merges: List[Tuple[str, str]] = None) -> Tokenizer: if not vocab: vocab = self.original_tokenizer.encoder if not merges: merges = list(self.original_tokenizer.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, unk_token=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, byte_fallback=False, ) ) tokenizer.normalizer = normalizers.NFC() tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [ pre_tokenizers.Split( Regex( r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""" ), behavior="isolated", invert=False, ), pre_tokenizers.ByteLevel( add_prefix_space=getattr(self.original_tokenizer, "add_prefix_space", False), use_regex=False, ), ] ) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) return tokenizer class RobertaConverter(Converter): def converted(self) -> Tokenizer: ot = self.original_tokenizer vocab = ot.encoder merges = list(ot.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.RobertaProcessing( sep=(ot.sep_token, ot.sep_token_id), cls=(ot.cls_token, ot.cls_token_id), add_prefix_space=ot.add_prefix_space, trim_offsets=True, # True by default on Roberta (historical) ) return tokenizer class RoFormerConverter(Converter): def converted(self) -> Tokenizer: from .models.roformer.tokenization_utils import JiebaPreTokenizer vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=False, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.PreTokenizer.custom(JiebaPreTokenizer(vocab)) cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class DebertaConverter(Converter): def converted(self) -> Tokenizer: ot = self.original_tokenizer vocab = ot.encoder merges = list(ot.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], ) return tokenizer class SpmConverter(Converter): handle_byte_fallback = False SpmExtractor = SentencePieceExtractor special_tokens = {} def __init__(self, *args): requires_backends(self, "protobuf") super().__init__(*args) # from .utils import sentencepiece_model_pb2 as model_pb2 model_pb2 = import_protobuf() m = model_pb2.ModelProto() with open(self.original_tokenizer.vocab_file, "rb") as f: m.ParseFromString(f.read()) self.proto = m if self.proto.trainer_spec.byte_fallback and not self.handle_byte_fallback: warnings.warn( "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option" " which is not implemented in the fast tokenizers. In practice this means that the fast version of the" " tokenizer can produce unknown tokens whereas the sentencepiece version would have converted these " "unknown tokens into a sequence of byte tokens matching the original piece of text." ) def vocab(self, proto): return [(piece.piece, piece.score) for piece in proto.pieces] def unk_id(self, proto): return proto.trainer_spec.unk_id def tokenizer(self, proto): model_type = proto.trainer_spec.model_type vocab_scores = self.vocab(proto) if model_type == 1: tokenizer = Tokenizer( Unigram( vocab_scores, unk_id=self.unk_id(proto), byte_fallback=self.handle_byte_fallback, ) ) elif model_type == 2: _, merges = self.SpmExtractor(self.original_tokenizer.vocab_file).extract(vocab_scores) bpe_vocab = {word: i for i, (word, score) in enumerate(vocab_scores)} tokenizer = Tokenizer( BPE( bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, byte_fallback=self.handle_byte_fallback, dropout=None, ) ) else: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) # control tokens are special # user defined symbols are not # both user and control tokens are AddedTokens # Add user defined symbols (type == 4) from sentencepiece (https://github.com/google/sentencepiece/blob/6225e08edb2577757163b3f5dbba4c0b670ef445/src/sentencepiece_model.proto#L299C29-L299C33) spm_added_tokens = [ (id, p.piece, p.type == 3 or p.piece in self.special_tokens) for id, p in enumerate(proto.pieces) if p.type in [3, 4] ] tokenizer.add_tokens( [ AddedToken(token, normalized=False, special=special) for id, token, special in sorted(spm_added_tokens, key=lambda x: x[0]) ] ) return tokenizer def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap _normalizers = [ normalizers.Strip(left=False, right=True), # stripping is important normalizers.Replace(Regex(" {2,}"), "▁"), ] if not precompiled_charsmap: return normalizers.Sequence(_normalizers) else: return normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap)] + _normalizers) def pre_tokenizer(self, replacement, add_prefix_space): prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) def post_processor(self): return None def decoder(self, replacement, add_prefix_space): prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) return decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) def converted(self) -> Tokenizer: tokenizer = self.tokenizer(self.proto) # Tokenizer assemble normalizer = self.normalizer(self.proto) if normalizer is not None: tokenizer.normalizer = normalizer replacement = "▁" add_prefix_space = True if hasattr(self.original_tokenizer, "add_prefix_space"): add_prefix_space = self.original_tokenizer.add_prefix_space pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space) if pre_tokenizer is not None: tokenizer.pre_tokenizer = pre_tokenizer tokenizer.decoder = self.decoder(replacement, add_prefix_space) post_processor = self.post_processor() if post_processor: tokenizer.post_processor = post_processor return tokenizer class AlbertConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): list_normalizers = [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), ] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap if precompiled_charsmap: list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " ")) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], ) class BarthezConverter(SpmConverter): def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class CamembertConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>NOTUSED", 0.0), ("<pad>", 0.0), ("</s>NOTUSED", 0.0), ("<unk>", 0.0), ("<unk>NOTUSED", -100), ] # We down-grade the original SentencePiece by -100 to avoid using it and use our added token instead vocab += [(piece.piece, piece.score) for piece in proto.pieces[1:]] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): # See vocab unk position return 3 def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class DebertaV2Converter(SpmConverter): def pre_tokenizer(self, replacement, add_prefix_space): list_pretokenizers = [] if self.original_tokenizer.split_by_punct: list_pretokenizers.append(pre_tokenizers.Punctuation(behavior="isolated")) prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) list_pretokenizers.append(pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)) return pre_tokenizers.Sequence(list_pretokenizers) def normalizer(self, proto): list_normalizers = [] if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) list_normalizers.append(normalizers.Strip()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap if precompiled_charsmap: list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " ")) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], ) class MBartConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [ ("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): return 3 def post_processor(self): return processors.TemplateProcessing( single="$A </s> en_XX", pair="$A $B </s> en_XX", special_tokens=[ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class MBart50Converter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ("af_ZA", 0.0), ("az_AZ", 0.0), ("bn_IN", 0.0), ("fa_IR", 0.0), ("he_IL", 0.0), ("hr_HR", 0.0), ("id_ID", 0.0), ("ka_GE", 0.0), ("km_KH", 0.0), ("mk_MK", 0.0), ("ml_IN", 0.0), ("mn_MN", 0.0), ("mr_IN", 0.0), ("pl_PL", 0.0), ("ps_AF", 0.0), ("pt_XX", 0.0), ("sv_SE", 0.0), ("sw_KE", 0.0), ("ta_IN", 0.0), ("te_IN", 0.0), ("th_TH", 0.0), ("tl_XX", 0.0), ("uk_UA", 0.0), ("ur_PK", 0.0), ("xh_ZA", 0.0), ("gl_ES", 0.0), ("sl_SI", 0.0)] # fmt: skip vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): return 3 def post_processor(self): return processors.TemplateProcessing( single="en_XX $A </s>", pair="en_XX $A $B </s>", special_tokens=[ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class NllbConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): return 3 def post_processor(self): return processors.TemplateProcessing( single="eng_Latn $A </s>", pair="eng_Latn $A $B </s>", special_tokens=[ ("eng_Latn", self.original_tokenizer.convert_tokens_to_ids("eng_Latn")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class SeamlessM4TConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<pad>", 0.0), ("<unk>", 0.0), ("<s>", 0.0), ("</s>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): return self.original_tokenizer.unk_token_id def post_processor(self): return processors.TemplateProcessing( single="__eng__ $A </s>", pair="__eng__ $A $B </s>", special_tokens=[ ("__eng__", self.original_tokenizer.convert_tokens_to_ids("__eng__")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class XLMRobertaConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class XLNetConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): list_normalizers = [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), ] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap if precompiled_charsmap: list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) list_normalizers.append(normalizers.Replace(Regex(" {2,}"), " ")) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing( single="$A:0 <sep>:0 <cls>:2", pair="$A:0 <sep>:0 $B:1 <sep>:1 <cls>:2", special_tokens=[ ("<sep>", self.original_tokenizer.convert_tokens_to_ids("<sep>")), ("<cls>", self.original_tokenizer.convert_tokens_to_ids("<cls>")), ], ) class ReformerConverter(SpmConverter): pass class RemBertConverter(SpmConverter): # Inspired from AlbertConverter def normalizer(self, proto): list_normalizers = [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), normalizers.Replace(Regex(" {2,}"), " "), ] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap if precompiled_charsmap: list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], ) class BertGenerationConverter(SpmConverter): pass class PegasusConverter(SpmConverter): def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0.0), (self.original_tokenizer.eos_token, 0.0), ] if self.original_tokenizer.mask_token_sent is not None: vocab += [(self.original_tokenizer.mask_token_sent, 0.0)] if ( self.original_tokenizer.mask_token is not None and self.original_tokenizer.mask_token_id < self.original_tokenizer.offset ): vocab += [(self.original_tokenizer.mask_token, 0.0)] vocab += [(f"<unk_{i}>", -100.0) for i in range(2, self.original_tokenizer.offset)] vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] return vocab def unk_id(self, proto): return proto.trainer_spec.unk_id + self.original_tokenizer.offset def pre_tokenizer(self, replacement, add_prefix_space): prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) return pre_tokenizers.Sequence( [ pre_tokenizers.WhitespaceSplit(), pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme), ] ) def post_processor(self): eos = self.original_tokenizer.eos_token special_tokens = [ (eos, self.original_tokenizer.eos_token_id), ] return processors.TemplateProcessing(single=["$A", eos], pair=["$A", "$B", eos], special_tokens=special_tokens) class T5Converter(SpmConverter): def vocab(self, proto): num_extra_ids = self.original_tokenizer._extra_ids vocab = [(piece.piece, piece.score) for piece in proto.pieces] vocab += [(f"<extra_id_{i}>", 0.0) for i in range(num_extra_ids - 1, -1, -1)] return vocab def post_processor(self): return processors.TemplateProcessing( single=["$A", "</s>"], pair=["$A", "</s>", "$B", "</s>"], special_tokens=[ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class UdopConverter(SpmConverter): def post_processor(self): return processors.TemplateProcessing( single=["$A", "</s>"], pair=["$A", "</s>", "$B", "</s>"], special_tokens=[ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class WhisperConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() prefix_token_ids = self.original_tokenizer.prefix_tokens prefixes = self.original_tokenizer.convert_ids_to_tokens(prefix_token_ids) eos = self.original_tokenizer.eos_token eos_token_id = self.original_tokenizer.eos_token_id prefix_template = " ".join([f"{token}:0" for token in prefixes]) tokenizer.post_processor = processors.TemplateProcessing( single=f"{prefix_template} $A:0 {eos}:0", pair=f"{prefix_template} $A:0 $B:1 {eos}:1", special_tokens=[ (eos, eos_token_id), *zip(prefixes, prefix_token_ids), ], ) return tokenizer class BigBirdConverter(SpmConverter): def post_processor(self): return processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], ) class CLIPConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) unk_token = self.original_tokenizer.unk_token tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="</w>", fuse_unk=False, unk_token=str(unk_token), ) ) tokenizer.normalizer = normalizers.Sequence( [normalizers.NFC(), normalizers.Replace(Regex(r"\s+"), " "), normalizers.Lowercase()] ) tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [ pre_tokenizers.Split( Regex(r"""'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+"""), behavior="removed", invert=True, ), pre_tokenizers.ByteLevel(add_prefix_space=False), ] ) tokenizer.decoder = decoders.ByteLevel() # Hack to have a ByteLevel and TemplaceProcessor tokenizer.post_processor = processors.RobertaProcessing( sep=(self.original_tokenizer.eos_token, self.original_tokenizer.eos_token_id), cls=(self.original_tokenizer.bos_token, self.original_tokenizer.bos_token_id), add_prefix_space=False, trim_offsets=False, ) return tokenizer class LayoutLMv2Converter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = True if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class BlenderbotConverter(Converter): def converted(self) -> Tokenizer: ot = self.original_tokenizer vocab = ot.encoder merges = list(ot.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.TemplateProcessing( single=f"$A:0 {ot.eos_token}:0", special_tokens=[ (ot.eos_token, ot.eos_token_id), ], ) return tokenizer class XGLMConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [("<madeupword0>", 0.0), ("<madeupword1>", 0.0), ("<madeupword2>", 0.0), ("<madeupword3>", 0.0), ("<madeupword4>", 0.0), ("<madeupword5>", 0.0), ("<madeupword6>", 0.0)] # fmt: skip return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self): return processors.TemplateProcessing( single="</s> $A", pair="</s> $A </s> </s> $B", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class GemmaConverter(SpmConverter): handle_byte_fallback = True SpmExtractor = GemmaSentencePieceExtractor # start and end of turn tokens must be marked as special special_tokens = {"<start_of_turn>", "<end_of_turn>"} """" split_by_unicode_script: true split_by_number: true split_by_whitespace: true treat_whitespace_as_suffix: false allow_whitespace_only_pieces: true split_digits: true byte_fallback: true """ def normalizer(self, proto): return normalizers.Replace(" ", "▁") def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0.0), (self.original_tokenizer.eos_token, 0.0), (self.original_tokenizer.bos_token, 0.0), ] for piece in proto.pieces[3:]: if piece.piece == "<0x09>": vocab += [("\t", piece.score)] else: vocab += [(piece.piece, piece.score)] # vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def pre_tokenizer(self, replacement, add_prefix_space): return pre_tokenizers.Split(" ", "merged_with_previous") def unk_id(self, proto): unk_id = 3 return unk_id def decoder(self, replacement, add_prefix_space): return decoders.Sequence( [ decoders.Replace("▁", " "), decoders.ByteFallback(), decoders.Fuse(), ] ) class LlamaConverter(SpmConverter): handle_byte_fallback = True def vocab(self, proto): vocab = [ (self.original_tokenizer.convert_ids_to_tokens(0), 0.0), (self.original_tokenizer.convert_ids_to_tokens(1), 0.0), (self.original_tokenizer.convert_ids_to_tokens(2), 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): unk_id = 0 return unk_id def decoder(self, replacement, add_prefix_space): sequence = [ decoders.Replace("▁", " "), decoders.ByteFallback(), decoders.Fuse(), ] if add_prefix_space: sequence += [decoders.Strip(content=" ", left=1)] return decoders.Sequence(sequence) def normalizer(self, proto): if getattr(self.original_tokenizer, "legacy", True): sequence = [] if getattr(self.original_tokenizer, "add_prefix_space", True): sequence += [normalizers.Prepend(prepend="▁")] sequence += [normalizers.Replace(pattern=" ", content="▁")] return normalizers.Sequence(sequence) return None # non-legacy, no normalizer def pre_tokenizer(self, replacement, add_prefix_space): if not getattr(self.original_tokenizer, "legacy", True): # non-legacy, we need a replace prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer) return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme, split=False) return None def post_processor(self): # the processor is defined in the LlamaTokenizerFast class. return None class MarkupLMConverter(Converter): def converted(self) -> Tokenizer: ot = self.original_tokenizer vocab = ot.encoder merges = list(ot.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, unk_token=self.original_tokenizer.unk_token, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls} $A {sep}", pair=f"{cls} $A {sep} $B {sep}", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) return tokenizer class MoshiConverter(SpmConverter): handle_byte_fallback = True def __init__(self, vocab_file, model_max_length=None, **kwargs): requires_backends(self, "protobuf") Converter.__init__(self, vocab_file) # from .utils import sentencepiece_model_pb2 as model_pb2 model_pb2 = import_protobuf() m = model_pb2.ModelProto() with open(vocab_file, "rb") as f: m.ParseFromString(f.read()) self.proto = m def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap _normalizers = [ normalizers.Replace(" ", "▁"), ] if not precompiled_charsmap: return normalizers.Sequence(_normalizers) else: return normalizers.Sequence([normalizers.Precompiled(precompiled_charsmap)] + _normalizers) def decoder(self, replacement, add_prefix_space): sequence = [ decoders.Replace("▁", " "), decoders.ByteFallback(), decoders.Fuse(), ] if add_prefix_space: sequence += [decoders.Strip(content=" ", left=1)] return decoders.Sequence(sequence) def pre_tokenizer(self, replacement, add_prefix_space): prepend_scheme = "first" return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme, split=False) class HeliumConverter(SpmConverter): handle_byte_fallback = True def __init__(self, vocab_file=None, *args): requires_backends(self, "protobuf") Converter.__init__(self, vocab_file) model_pb2 = import_protobuf() m = model_pb2.ModelProto() with open(vocab_file, "rb") as f: m.ParseFromString(f.read()) self.proto = m def tokenizer(self, proto): vocab_scores = self.vocab(proto) tokenizer = Tokenizer( Unigram( vocab_scores, unk_id=self.unk_id(proto), byte_fallback=self.handle_byte_fallback, ) ) # control tokens are special # user defined symbols are not # both user and control tokens are AddedTokens # Add user defined symbols (type == 4) from sentencepiece (https://github.com/google/sentencepiece/blob/6225e08edb2577757163b3f5dbba4c0b670ef445/src/sentencepiece_model.proto#L299C29-L299C33) spm_added_tokens = [ (id, p.piece, p.type == 3 or p.piece in self.special_tokens) for id, p in enumerate(proto.pieces) if p.type in [3, 4] ] tokenizer.add_tokens( [ AddedToken(token, normalized=False, special=special, single_word=True) for id, token, special in sorted(spm_added_tokens, key=lambda x: x[0]) ] ) tokenizer.add_tokens([AddedToken("\n", normalized=False, special=False)]) tokenizer.enable_padding(pad_token="<pad>", pad_id=3) return tokenizer def vocab(self, proto): vocab = [] for piece in proto.pieces: if piece.piece == "<0x0A>": vocab += [("\n", piece.score)] else: vocab += [(piece.piece, piece.score)] return vocab def unk_id(self, proto): unk_id = 0 return unk_id def decoder(self, replacement, add_prefix_space): sequence = [ decoders.Replace("▁", " "), decoders.ByteFallback(), decoders.Fuse(), ] sequence += [decoders.Strip(content=" ", left=1)] return decoders.Sequence(sequence) def normalizer(self, proto): return normalizers.Sequence([normalizers.Prepend(" "), normalizers.Replace(r" ", "▁")]) def pre_tokenizer(self, replacement, add_prefix_space): return pre_tokenizers.Sequence([pre_tokenizers.Split("\n", "contiguous")]) def post_processor(self): return processors.TemplateProcessing( single=[ "<s>", "$A", ], pair=[ "<s>", "$A", "<s>", "$B", ], special_tokens=[ ("<s>", 1), ], ) # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) class TikTokenConverter: """ A general tiktoken converter. """ def __init__( self, vocab_file=None, pattern=r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""", add_prefix_space=False, additional_special_tokens=None, *args, **kwargs, ): super().__init__(*args) self.vocab_file = vocab_file self.pattern = pattern self.add_prefix_space = add_prefix_space self.additional_special_tokens = additional_special_tokens def extract_vocab_merges_from_model(self, tiktoken_url: str): try: from tiktoken.load import load_tiktoken_bpe except Exception: raise ValueError( "`tiktoken` is required to read a `tiktoken` file. Install it with " "`pip install tiktoken`." ) bpe_ranks = load_tiktoken_bpe(tiktoken_url) byte_encoder = bytes_to_unicode() def token_bytes_to_string(b): return "".join([byte_encoder[ord(char)] for char in b.decode("latin-1")]) merges = [] vocab = {} for token, rank in bpe_ranks.items(): vocab[token_bytes_to_string(token)] = rank if len(token) == 1: continue local = [] for index in range(1, len(token)): piece_l, piece_r = token[:index], token[index:] if piece_l in bpe_ranks and piece_r in bpe_ranks and (piece_l + piece_r) in bpe_ranks: local.append((piece_l, piece_r, rank)) local = sorted(local, key=lambda x: (bpe_ranks[x[0]], bpe_ranks[x[1]]), reverse=False) merges.extend(local) merges = sorted(merges, key=lambda val: val[2], reverse=False) merges = [(token_bytes_to_string(val[0]), token_bytes_to_string(val[1])) for val in merges] return vocab, merges def tokenizer(self): vocab_scores, merges = self.extract_vocab_merges_from_model(self.vocab_file) tokenizer = Tokenizer(BPE(vocab_scores, merges, fuse_unk=False)) if hasattr(tokenizer.model, "ignore_merges"): tokenizer.model.ignore_merges = True return tokenizer def converted(self) -> Tokenizer: tokenizer = self.tokenizer() tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [ pre_tokenizers.Split(Regex(self.pattern), behavior="isolated", invert=False), pre_tokenizers.ByteLevel(add_prefix_space=self.add_prefix_space, use_regex=False), ] ) tokenizer.decoder = decoders.ByteLevel() tokenizer.add_special_tokens(self.additional_special_tokens) tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) return tokenizer SLOW_TO_FAST_CONVERTERS = { "AlbertTokenizer": AlbertConverter, "BartTokenizer": RobertaConverter, "BarthezTokenizer": BarthezConverter, "BertTokenizer": BertConverter, "BigBirdTokenizer": BigBirdConverter, "BlenderbotTokenizer": BlenderbotConverter, "CamembertTokenizer": CamembertConverter, "CLIPTokenizer": CLIPConverter, "CodeGenTokenizer": GPT2Converter, "ConvBertTokenizer": BertConverter, "DebertaTokenizer": DebertaConverter, "DebertaV2Tokenizer": DebertaV2Converter, "DistilBertTokenizer": BertConverter, "DPRReaderTokenizer": BertConverter, "DPRQuestionEncoderTokenizer": BertConverter, "DPRContextEncoderTokenizer": BertConverter, "ElectraTokenizer": BertConverter, "FNetTokenizer": AlbertConverter, "FunnelTokenizer": FunnelConverter, "GPT2Tokenizer": GPT2Converter, "HerbertTokenizer": HerbertConverter, "LayoutLMTokenizer": BertConverter, "LayoutLMv2Tokenizer": BertConverter, "LayoutLMv3Tokenizer": RobertaConverter, "LayoutXLMTokenizer": XLMRobertaConverter, "LongformerTokenizer": RobertaConverter, "LEDTokenizer": RobertaConverter, "LxmertTokenizer": BertConverter, "MarkupLMTokenizer": MarkupLMConverter, "MBartTokenizer": MBartConverter, "MBart50Tokenizer": MBart50Converter, "MPNetTokenizer": MPNetConverter, "MobileBertTokenizer": BertConverter, "MvpTokenizer": RobertaConverter, "NllbTokenizer": NllbConverter, "OpenAIGPTTokenizer": OpenAIGPTConverter, "PegasusTokenizer": PegasusConverter, "Qwen2Tokenizer": Qwen2Converter, "RealmTokenizer": BertConverter, "ReformerTokenizer": ReformerConverter, "RemBertTokenizer": RemBertConverter, "RetriBertTokenizer": BertConverter, "RobertaTokenizer": RobertaConverter, "RoFormerTokenizer": RoFormerConverter, "SeamlessM4TTokenizer": SeamlessM4TConverter, "SqueezeBertTokenizer": BertConverter, "T5Tokenizer": T5Converter, "UdopTokenizer": UdopConverter, "WhisperTokenizer": WhisperConverter, "XLMRobertaTokenizer": XLMRobertaConverter, "XLNetTokenizer": XLNetConverter, "SplinterTokenizer": SplinterConverter, "XGLMTokenizer": XGLMConverter, "LlamaTokenizer": LlamaConverter, "CodeLlamaTokenizer": LlamaConverter, "GemmaTokenizer": GemmaConverter, "Phi3Tokenizer": LlamaConverter, } def convert_slow_tokenizer(transformer_tokenizer, from_tiktoken=False) -> Tokenizer: """ Utilities to convert a slow tokenizer instance in a fast tokenizer instance. Args: transformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]): Instance of a slow tokenizer to convert in the backend tokenizer for [`~tokenization_utils_base.PreTrainedTokenizerFast`]. from_tiktoken (bool, optional): Whether to use the `tiktoken` library to convert the tokenizer instead of sentencepiece. Defaults to False. Return: A instance of [`~tokenizers.Tokenizer`] to be used as the backend tokenizer of a [`~tokenization_utils_base.PreTrainedTokenizerFast`] """ tokenizer_class_name = transformer_tokenizer.__class__.__name__ if tokenizer_class_name in SLOW_TO_FAST_CONVERTERS and not from_tiktoken: converter_class = SLOW_TO_FAST_CONVERTERS[tokenizer_class_name] return converter_class(transformer_tokenizer).converted() else: try: logger.info("Converting from Tiktoken") return TikTokenConverter( vocab_file=transformer_tokenizer.vocab_file, additional_special_tokens=transformer_tokenizer.additional_special_tokens, ).converted() except Exception: raise ValueError( f"Converting from Tiktoken failed, if a converter for SentencePiece is available, provide a model path " f"with a SentencePiece tokenizer.model file." f"Currently available slow->fast convertors: {list(SLOW_TO_FAST_CONVERTERS.keys())}" )
transformers/src/transformers/convert_slow_tokenizer.py/0
{ "file_path": "transformers/src/transformers/convert_slow_tokenizer.py", "repo_id": "transformers", "token_count": 30672 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from .utils import ExplicitEnum, is_torch_available, logging if is_torch_available(): import torch logger = logging.get_logger(__name__) class DebugUnderflowOverflow: """ This debug class helps detect and understand where the model starts getting very large or very small, and more importantly `nan` or `inf` weight and activation elements. There are 2 working modes: 1. Underflow/overflow detection (default) 2. Specific batch absolute min/max tracing without detection Mode 1: Underflow/overflow detection To activate the underflow/overflow detection, initialize the object with the model : ```python debug_overflow = DebugUnderflowOverflow(model) ``` then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or output elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this event, each frame reporting 1. the fully qualified module name plus the class name whose `forward` was run 2. the absolute min and max value of all elements for each module weights, and the inputs and output For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 mixed precision : ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value was around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than 64K, and we get an overlow. As you can see it's the previous frames that we need to look into when the numbers start going into very large for fp16 numbers. The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed. By default the last 21 frames are printed. You can change the default to adjust for your needs. For example : ```python debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) ``` To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next section. Mode 2. Specific batch absolute min/max tracing without detection The second work mode is per-batch tracing with the underflow/overflow detection feature turned off. Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a given batch, and only do that for batches 1 and 3. Then you instantiate this class as : ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) ``` And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed. This is helpful if you know that the program starts misbehaving after a certain batch number, so you can fast-forward right to that area. Early stopping: You can also specify the batch number after which to stop the training, with : ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) ``` This feature is mainly useful in the tracing mode, but you can use it for any mode. **Performance**: As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the training down. Therefore remember to turn it off once the debugging needs have been met. Args: model (`nn.Module`): The model to debug. max_frames_to_save (`int`, *optional*, defaults to 21): How many frames back to record trace_batch_nums(`List[int]`, *optional*, defaults to `[]`): Which batch numbers to trace (turns detection off) abort_after_batch_num (`int``, *optional*): Whether to abort after a certain batch number has finished """ def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None): self.model = model self.trace_batch_nums = trace_batch_nums self.abort_after_batch_num = abort_after_batch_num # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence self.frames = collections.deque([], max_frames_to_save) self.frame = [] self.batch_number = 0 self.total_calls = 0 self.detected_overflow = False self.prefix = " " self.analyse_model() self.register_forward_hook() def save_frame(self, frame=None): if frame is not None: self.expand_frame(frame) self.frames.append("\n".join(self.frame)) self.frame = [] # start a new frame def expand_frame(self, line): self.frame.append(line) def trace_frames(self): print("\n".join(self.frames)) self.frames = [] def reset_saved_frames(self): self.frames = [] def dump_saved_frames(self): print(f"\nDetected inf/nan during batch_number={self.batch_number}") print(f"Last {len(self.frames)} forward frames:") print(f"{'abs min':8} {'abs max':8} metadata") print("\n".join(self.frames)) print("\n\n") self.frames = [] def analyse_model(self): # extract the fully qualified module names, to be able to report at run time. e.g.: # encoder.block.2.layer.0.SelfAttention.o # # for shared weights only the first shared module name will be registered self.module_names = {m: name for name, m in self.model.named_modules()} # self.longest_module_name = max(len(v) for v in self.module_names.values()) def analyse_variable(self, var, ctx): if torch.is_tensor(var): self.expand_frame(get_abs_min_max(var, ctx)) if detect_overflow(var, ctx): self.detected_overflow = True elif var is None: self.expand_frame(f"{'None':>17} {ctx}") else: self.expand_frame(f"{'not a tensor':>17} {ctx}") def batch_start_frame(self): self.expand_frame(f"\n\n{self.prefix} *** Starting batch number={self.batch_number} ***") self.expand_frame(f"{'abs min':8} {'abs max':8} metadata") def batch_end_frame(self): self.expand_frame(f"{self.prefix} *** Finished batch number={self.batch_number-1} ***\n\n") def create_frame(self, module, input, output): self.expand_frame(f"{self.prefix} {self.module_names[module]} {module.__class__.__name__}") # params for name, p in module.named_parameters(recurse=False): self.analyse_variable(p, name) # inputs if isinstance(input, tuple): for i, x in enumerate(input): self.analyse_variable(x, f"input[{i}]") else: self.analyse_variable(input, "input") # outputs if isinstance(output, tuple): for i, x in enumerate(output): # possibly a tuple of tuples if isinstance(x, tuple): for j, y in enumerate(x): self.analyse_variable(y, f"output[{i}][{j}]") else: self.analyse_variable(x, f"output[{i}]") else: self.analyse_variable(output, "output") self.save_frame() def register_forward_hook(self): self.model.apply(self._register_forward_hook) def _register_forward_hook(self, module): module.register_forward_hook(self.forward_hook) def forward_hook(self, module, input, output): # - input is a tuple of packed inputs (could be non-Tensors) # - output could be a Tensor or a tuple of Tensors and non-Tensors last_frame_of_batch = False trace_mode = True if self.batch_number in self.trace_batch_nums else False if trace_mode: self.reset_saved_frames() if self.total_calls == 0: self.batch_start_frame() self.total_calls += 1 # count batch numbers - the very first forward hook of the batch will be called when the # batch completes - i.e. it gets called very last - we know this batch has finished if module == self.model: self.batch_number += 1 last_frame_of_batch = True self.create_frame(module, input, output) # if last_frame_of_batch: # self.batch_end_frame() if trace_mode: self.trace_frames() if last_frame_of_batch: self.batch_start_frame() if self.detected_overflow and not trace_mode: self.dump_saved_frames() # now we can abort, as it's pointless to continue running raise ValueError( "DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. " "Please scroll up above this traceback to see the activation values prior to this event." ) # abort after certain batch if requested to do so if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num: raise ValueError( f"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to" f" `abort_after_batch_num={self.abort_after_batch_num}` arg" ) def get_abs_min_max(var, ctx): abs_var = var.abs() return f"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}" def detect_overflow(var, ctx): """ Report whether the tensor contains any `nan` or `inf` entries. This is useful for detecting overflows/underflows and best to call right after the function that did some math that modified the tensor in question. This function contains a few other helper features that you can enable and tweak directly if you want to track various other things. Args: var: the tensor variable to check ctx: the message to print as a context Return: `True` if `inf` or `nan` was detected, `False` otherwise """ detected = False if torch.isnan(var).any().item(): detected = True print(f"{ctx} has nans") if torch.isinf(var).any().item(): detected = True print(f"{ctx} has infs") # if needed to monitor large elements can enable the following if 0: # and detected: n100 = var[torch.ge(var.abs(), 100)] if n100.numel() > 0: print(f"{ctx}: n100={n100.numel()}") n1000 = var[torch.ge(var.abs(), 1000)] if n1000.numel() > 0: print(f"{ctx}: n1000={n1000.numel()}") n10000 = var[torch.ge(var.abs(), 10000)] if n10000.numel() > 0: print(f"{ctx}: n10000={n10000.numel()}") if 0: print(f"min={var.min():9.2e} max={var.max():9.2e}") if 0: print(f"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})") return detected class DebugOption(ExplicitEnum): UNDERFLOW_OVERFLOW = "underflow_overflow" TPU_METRICS_DEBUG = "tpu_metrics_debug"
transformers/src/transformers/debug_utils.py/0
{ "file_path": "transformers/src/transformers/debug_utils.py", "repo_id": "transformers", "token_count": 5154 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import asyncio from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class BaseStreamer: """ Base class from which `.generate()` streamers should inherit. """ def put(self, value): """Function that is called by `.generate()` to push new tokens""" raise NotImplementedError() def end(self): """Function that is called by `.generate()` to signal the end of generation""" raise NotImplementedError() class TextStreamer(BaseStreamer): """ Simple text streamer that prints the token(s) to stdout as soon as entire words are formed. <Tip warning={true}> The API for the streamer classes is still under development and may change in the future. </Tip> Parameters: tokenizer (`AutoTokenizer`): The tokenized used to decode the tokens. skip_prompt (`bool`, *optional*, defaults to `False`): Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots. decode_kwargs (`dict`, *optional*): Additional keyword arguments to pass to the tokenizer's `decode` method. Examples: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") >>> streamer = TextStreamer(tok) >>> # Despite returning the usual output, the streamer will also print the generated text to stdout. >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20) An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven, ``` """ def __init__(self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, **decode_kwargs): self.tokenizer = tokenizer self.skip_prompt = skip_prompt self.decode_kwargs = decode_kwargs # variables used in the streaming process self.token_cache = [] self.print_len = 0 self.next_tokens_are_prompt = True def put(self, value): """ Receives tokens, decodes them, and prints them to stdout as soon as they form entire words. """ if len(value.shape) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1") elif len(value.shape) > 1: value = value[0] if self.skip_prompt and self.next_tokens_are_prompt: self.next_tokens_are_prompt = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist()) text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) # After the symbol for a new line, we flush the cache. if text.endswith("\n"): printable_text = text[self.print_len :] self.token_cache = [] self.print_len = 0 # If the last token is a CJK character, we print the characters. elif len(text) > 0 and self._is_chinese_char(ord(text[-1])): printable_text = text[self.print_len :] self.print_len += len(printable_text) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: printable_text = text[self.print_len : text.rfind(" ") + 1] self.print_len += len(printable_text) self.on_finalized_text(printable_text) def end(self): """Flushes any remaining cache and prints a newline to stdout.""" # Flush the cache, if it exists if len(self.token_cache) > 0: text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) printable_text = text[self.print_len :] self.token_cache = [] self.print_len = 0 else: printable_text = "" self.next_tokens_are_prompt = True self.on_finalized_text(printable_text, stream_end=True) def on_finalized_text(self, text: str, stream_end: bool = False): """Prints the new text to stdout. If the stream is ending, also prints a newline.""" print(text, flush=True, end="" if not stream_end else None) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False class TextIteratorStreamer(TextStreamer): """ Streamer that stores print-ready text in a queue, to be used by a downstream application as an iterator. This is useful for applications that benefit from acessing the generated text in a non-blocking way (e.g. in an interactive Gradio demo). <Tip warning={true}> The API for the streamer classes is still under development and may change in the future. </Tip> Parameters: tokenizer (`AutoTokenizer`): The tokenized used to decode the tokens. skip_prompt (`bool`, *optional*, defaults to `False`): Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots. timeout (`float`, *optional*): The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions in `.generate()`, when it is called in a separate thread. decode_kwargs (`dict`, *optional*): Additional keyword arguments to pass to the tokenizer's `decode` method. Examples: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer >>> from threading import Thread >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") >>> streamer = TextIteratorStreamer(tok) >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way. >>> generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20) >>> thread = Thread(target=model.generate, kwargs=generation_kwargs) >>> thread.start() >>> generated_text = "" >>> for new_text in streamer: ... generated_text += new_text >>> generated_text 'An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,' ``` """ def __init__( self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs ): super().__init__(tokenizer, skip_prompt, **decode_kwargs) self.text_queue = Queue() self.stop_signal = None self.timeout = timeout def on_finalized_text(self, text: str, stream_end: bool = False): """Put the new text in the queue. If the stream is ending, also put a stop signal in the queue.""" self.text_queue.put(text, timeout=self.timeout) if stream_end: self.text_queue.put(self.stop_signal, timeout=self.timeout) def __iter__(self): return self def __next__(self): value = self.text_queue.get(timeout=self.timeout) if value == self.stop_signal: raise StopIteration() else: return value class AsyncTextIteratorStreamer(TextStreamer): """ Streamer that stores print-ready text in a queue, to be used by a downstream application as an async iterator. This is useful for applications that benefit from acessing the generated text asynchronously (e.g. in an interactive Gradio demo). <Tip warning={true}> The API for the streamer classes is still under development and may change in the future. </Tip> Parameters: tokenizer (`AutoTokenizer`): The tokenized used to decode the tokens. skip_prompt (`bool`, *optional*, defaults to `False`): Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots. timeout (`float`, *optional*): The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions in `.generate()`, when it is called in a separate thread. decode_kwargs (`dict`, *optional*): Additional keyword arguments to pass to the tokenizer's `decode` method. Raises: TimeoutError: If token generation time exceeds timeout value. Examples: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, AsyncTextIteratorStreamer >>> from threading import Thread >>> import asyncio >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way. >>> async def main(): ... # Important: AsyncTextIteratorStreamer must be initialized inside a coroutine! ... streamer = AsyncTextIteratorStreamer(tok) ... generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20) ... thread = Thread(target=model.generate, kwargs=generation_kwargs) ... thread.start() ... generated_text = "" ... async for new_text in streamer: ... generated_text += new_text >>> print(generated_text) >>> asyncio.run(main()) An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven, ``` """ def __init__( self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs ): super().__init__(tokenizer, skip_prompt, **decode_kwargs) self.text_queue = asyncio.Queue() self.stop_signal = None self.timeout = timeout self.loop = asyncio.get_running_loop() self.has_asyncio_timeout = hasattr(asyncio, "timeout") def on_finalized_text(self, text: str, stream_end: bool = False): """Put the new text in the queue. If the stream is ending, also put a stop signal in the queue.""" self.loop.call_soon_threadsafe(self.text_queue.put_nowait, text) if stream_end: self.loop.call_soon_threadsafe(self.text_queue.put_nowait, self.stop_signal) def __aiter__(self): return self async def __anext__(self): try: if self.has_asyncio_timeout: async with asyncio.timeout(self.timeout): value = await self.text_queue.get() else: value = await asyncio.wait_for(self.text_queue.get(), timeout=self.timeout) except asyncio.TimeoutError: raise TimeoutError() else: if value == self.stop_signal: raise StopAsyncIteration() else: return value
transformers/src/transformers/generation/streamers.py/0
{ "file_path": "transformers/src/transformers/generation/streamers.py", "repo_id": "transformers", "token_count": 5143 }