text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
import { VisionEncoderDecoderModel, full } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { describe("VisionEncoderDecoderModel", () => { const model_id = "hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2"; /** @type {VisionEncoderDecoderModel} */ let model; beforeAll(async () => { model = await VisionEncoderDecoderModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const outputs = await model.generate({ pixel_values: full([1, 3, 30, 30], -1.0), max_length: 5, }); expect(outputs.tolist()).toEqual([[0n, 400n, 400n, 400n, 400n]]); }, MAX_TEST_EXECUTION_TIME, ); // TODO: Add back // it('batch_size>1', async () => { // const outputs = await model.generate({ // pixel_values: cat([ // full([1, 3, 30, 30], -1.0), // full([1, 3, 30, 30], 0.0), // ]), // max_length: 5, // }); // expect(outputs.tolist()).toEqual([ // // Generation continues // [0n, 400n, 400n, 400n, 400n], // // Finishes early. 1023 is the padding token // [0n, 0n, 1023n, 1023n, 1023n], // ]); // }, MAX_TEST_EXECUTION_TIME); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.js/0
{ "file_path": "transformers.js/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.js", "repo_id": "transformers.js", "token_count": 752 }
import { pipeline, DocumentQuestionAnsweringPipeline, RawImage } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; const PIPELINE_ID = "document-question-answering"; export default () => { describe("Document Question Answering", () => { const model_id = "hf-internal-testing/tiny-random-VisionEncoderDecoderModel-donutswin-mbart"; /** @type {DocumentQuestionAnsweringPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of DocumentQuestionAnsweringPipeline", () => { expect(pipe).toBeInstanceOf(DocumentQuestionAnsweringPipeline); }); describe("batch_size=1", () => { it( "default", async () => { const dims = [64, 32, 3]; const image = new RawImage(new Uint8ClampedArray(dims[0] * dims[1] * dims[2]).fill(255), ...dims); const question = "What is the invoice number?"; const output = await pipe(image, question); const target = [{ answer: null }]; expect(output).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/pipelines/test_pipelines_document_question_answering.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_document_question_answering.js", "repo_id": "transformers.js", "token_count": 571 }
import { pipeline, TranslationPipeline } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; const PIPELINE_ID = "translation"; export default () => { describe("Translation", () => { const model_id = "Xenova/tiny-random-M2M100ForConditionalGeneration"; /** @type {TranslationPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of TranslationPipeline", () => { expect(pipe).toBeInstanceOf(TranslationPipeline); }); describe("batch_size=1", () => { it( "default", async () => { const text = "जीवन एक चॉकलेट बॉक्स की तरह है।"; const output = await pipe(text, { src_lang: "hi", tgt_lang: "fr", max_new_tokens: 5, }); const target = [{ translation_text: "Slovenska төсли төсли төсли" }]; expect(output).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/pipelines/test_pipelines_translation.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_translation.js", "repo_id": "transformers.js", "token_count": 607 }
import { AutoProcessor, hamming, hanning, mel_filter_bank } from "../../src/transformers.js"; import { getFile } from "../../src/utils/hub.js"; import { RawImage } from "../../src/utils/image.js"; import { MAX_TEST_EXECUTION_TIME } from "../init.js"; import { compare } from "../test_utils.js"; describe("Utilities", () => { describe("Audio utilities", () => { it( "should calculate MEL filters", async () => { // NOTE: Uses official HF implementation as reference: const processor = await AutoProcessor.from_pretrained("openai/whisper-tiny.en"); const config = processor.feature_extractor.config; // True MEL filters const original_mel_filters = config.mel_filters; // Calculated MEL filters const calculated_mel_filters = mel_filter_bank( Math.floor(1 + config.n_fft / 2), // num_frequency_bins config.feature_size, // num_mel_filters 0.0, // min_frequency 8000.0, // max_frequency config.sampling_rate, // sampling_rate "slaney", // norm "slaney", // mel_scale ); const original = original_mel_filters.flat(); const calculated = calculated_mel_filters.flat(); // Compute max difference const maxdiff = original.reduce((maxdiff, _, i) => { const diff = Math.abs(original[i] - calculated[i]); return Math.max(maxdiff, diff); }, -Infinity); expect(maxdiff).toBeGreaterThanOrEqual(0); expect(maxdiff).toBeLessThan(1e-6); }, MAX_TEST_EXECUTION_TIME, ); it( "should calculate window", async () => { compare(hanning(10), new Float64Array([0.0, 0.11697777844051105, 0.41317591116653485, 0.75, 0.9698463103929542, 0.9698463103929542, 0.75, 0.41317591116653485, 0.11697777844051105, 0.0])); compare(hamming(10), new Float64Array([0.08000000000000002, 0.1876195561652702, 0.46012183827321207, 0.7700000000000001, 0.9722586055615179, 0.9722586055615179, 0.7700000000000001, 0.46012183827321207, 0.1876195561652702, 0.08000000000000002])); }, MAX_TEST_EXECUTION_TIME, ); }); describe("Hub utilities", () => { it("Read data from blob", async () => { const blob = new Blob(["Hello, world!"], { type: "text/plain" }); const blobUrl = URL.createObjectURL(blob); const data = await getFile(blobUrl); expect(await data.text()).toBe("Hello, world!"); }); }); describe("Image utilities", () => { const [width, height, channels] = [2, 2, 3]; const data = Uint8Array.from({ length: width * height * channels }, (_, i) => i % 5); const tiny_image = new RawImage(data, width, height, channels); let image; beforeAll(async () => { image = await RawImage.fromURL("https://picsum.photos/300/200"); }); it("Can split image into separate channels", async () => { const image_data = tiny_image.split().map((x) => x.data); const target = [ new Uint8Array([0, 3, 1, 4]), // Reds new Uint8Array([1, 4, 2, 0]), // Greens new Uint8Array([2, 0, 3, 1]), // Blues ]; compare(image_data, target); }); it("Can splits channels for grayscale", async () => { const image_data = tiny_image .grayscale() .split() .map((x) => x.data); const target = [new Uint8Array([1, 3, 2, 1])]; compare(image_data, target); }); it("Read image from URL", async () => { expect(image.width).toBe(300); expect(image.height).toBe(200); expect(image.channels).toBe(3); }); it("Can resize image", async () => { const resized = await image.resize(150, 100); expect(resized.width).toBe(150); expect(resized.height).toBe(100); }); it("Can resize with aspect ratio", async () => { const resized = await image.resize(150, null); expect(resized.width).toBe(150); expect(resized.height).toBe(100); }); it("Returns original image if width and height are null", async () => { const resized = await image.resize(null, null); expect(resized.width).toBe(300); expect(resized.height).toBe(200); }); }); });
transformers.js/tests/utils/utils.test.js/0
{ "file_path": "transformers.js/tests/utils/utils.test.js", "repo_id": "transformers.js", "token_count": 1769 }
defaults: - benchmark # inheriting benchmark schema - scenario: inference - launcher: process - backend: pytorch - _self_ # for hydra 1.1 compatibility name: pytorch_generate launcher: start_method: spawn device_isolation: true device_isolation_action: warn backend: device: cuda device_ids: 0 no_weights: true model: meta-llama/Llama-2-7b-hf cache_implementation: static torch_compile: true torch_dtype: float16 torch_compile_config: backend: inductor mode: reduce-overhead fullgraph: true scenario: input_shapes: batch_size: 1 sequence_length: 7 generate_kwargs: max_new_tokens: 128 min_new_tokens: 128 do_sample: false memory: true latency: true iterations: 2 duration: 0 # hydra/cli specific settings hydra: run: # where to store run results dir: runs/${name} job: # change working directory to the run directory chdir: true env_set: # set environment variable OVERRIDE_BENCHMARKS to 1 # to not skip benchmarks that have been run before OVERRIDE_BENCHMARKS: 1 LOG_LEVEL: WARN sweep: dir: multirun subdir: ${hydra.job.override_dirname}
transformers/benchmark/config/generation.yaml/0
{ "file_path": "transformers/benchmark/config/generation.yaml", "repo_id": "transformers", "token_count": 451 }
FROM python:3.9-slim ENV PYTHONDONTWRITEBYTECODE=1 ARG REF=main USER root RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake g++ ENV UV_PYTHON=/usr/local/bin/python RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools RUN pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]" RUN uv pip install --no-cache-dir "protobuf==3.20.3" tensorflow_probability RUN apt-get clean && rm -rf /var/lib/apt/lists/*
transformers/docker/pipeline-tf.dockerfile/0
{ "file_path": "transformers/docker/pipeline-tf.dockerfile", "repo_id": "transformers", "token_count": 229 }
FROM google/cloud-sdk:slim # Build args. ARG GITHUB_REF=refs/heads/main # TODO: This Dockerfile installs pytorch/xla 3.6 wheels. There are also 3.7 # wheels available; see below. ENV PYTHON_VERSION=3.6 RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential \ cmake \ git \ curl \ ca-certificates # Install conda and python. # NOTE new Conda does not forward the exit status... https://github.com/conda/conda/issues/8385 RUN curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-4.7.12-Linux-x86_64.sh && \ chmod +x ~/miniconda.sh && \ ~/miniconda.sh -b && \ rm ~/miniconda.sh ENV PATH=/root/miniconda3/bin:$PATH RUN conda create -y --name container python=$PYTHON_VERSION # Run the rest of commands within the new conda env. # Use absolute path to appease Codefactor. SHELL ["/root/miniconda3/bin/conda", "run", "-n", "container", "/bin/bash", "-c"] RUN conda install -y python=$PYTHON_VERSION mkl RUN pip uninstall -y torch && \ # Python 3.7 wheels are available. Replace cp36-cp36m with cp37-cp37m gsutil cp 'gs://tpu-pytorch/wheels/torch-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' . && \ gsutil cp 'gs://tpu-pytorch/wheels/torch_xla-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' . && \ gsutil cp 'gs://tpu-pytorch/wheels/torchvision-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' . && \ pip install 'torch-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \ pip install 'torch_xla-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \ pip install 'torchvision-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \ rm 'torch-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \ rm 'torch_xla-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \ rm 'torchvision-nightly-cp${PYTHON_VERSION/./}-cp${PYTHON_VERSION/./}m-linux_x86_64.whl' && \ apt-get install -y libomp5 ENV LD_LIBRARY_PATH=root/miniconda3/envs/container/lib # Install huggingface/transformers at the current PR, plus dependencies. RUN git clone https://github.com/huggingface/transformers.git && \ cd transformers && \ git fetch origin $GITHUB_REF:CI && \ git checkout CI && \ cd .. && \ pip install ./transformers && \ pip install -r ./transformers/examples/pytorch/_test_requirements.txt && \ pip install pytest RUN python -c "import torch_xla; print(torch_xla.__version__)" RUN python -c "import transformers as trf; print(trf.__version__)" RUN conda init bash COPY docker-entrypoint.sh /usr/local/bin/ RUN chmod +x /usr/local/bin/docker-entrypoint.sh ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] CMD ["bash"]
transformers/docker/transformers-pytorch-tpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-pytorch-tpu/Dockerfile", "repo_id": "transformers", "token_count": 1235 }
# قوالب نماذج الدردشة ## مقدمة تعد **الدردشة** أحد استخدامات نماذج اللغات الكبيرة (LLMs) شائعة الاستخدام بشكل متزايد. ففي سياق الدردشة، وبدلاً من متابعة سلسلة نصية واحدة (كما هو الحال مع نماذج اللغات القياسية)، يواصل النموذج بدلاً من ذلك محادثة تتكون من رسالة واحدة أو أكثر، تتضمن كل منها دورًا، مثل "المستخدم" أو "المساعد"، بالإضافة إلى نص الرسالة. وكما هو الحال مع تقسيم النص إلى رموز (tokenization)، تتوقع النماذج المختلفة تنسيقات إدخال مختلفة تمامًا للمحادثة. لهذا السبب أضفنا **قوالب الدردشة** كميزة جديدة. تُعد قوالب المحادثة جزءًا من tokenizer. تحدد هذه القوالب كيفية تحويل المحادثات، والتي يتم تمثيلها كقوائم من الرسائل، إلى سلسلة نصية واحدة قابلة للتقسيم إلى رموز بالتنسيق الذي يتوقعه النموذج. دعونا نجعل هذا ملموسًا بمثال سريع باستخدام نموذج `BlenderBot`. لدى BlenderBot قالب افتراضي بسيط للغاية، والذي يضيف في الغالب مسافات بيضاء بين جولات الحوار: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) " Hello, how are you? I'm doing great. How can I help you today? I'd like to show off how chat templating works!</s>" ``` لاحظ كيف تم ضغط الدردشة بأكملها في سلسلة واحدة. إذا استخدمنا `tokenize=True`، وهو الإعداد الافتراضي، فسيتم أيضًا تحليل السلسلة نحويًا نيابة عنا. ولكن، لنشاهد قالبًا أكثر تعقيدًا في العمل، دعونا نستخدم نموذج `mistralai/Mistral-7B-Instruct-v0.1`. ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") >>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) "<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]</s>" ``` لاحظ كيف أضاف المجزىء اللغوى tokenizer رموز التحكم `[INST]` و `[/INST]` للإشارة إلى بداية ونهاية رسائل المستخدم (ولكن ليس رسائل المساعد!) ، وتم تكثيف المحادثة بأكملها في سلسلة نصية واحدة. إذا استخدمنا `tokenize=True` ، وهو الإعداد الافتراضي ، فسيتم أيضًا تقسيم تلك السلسلة إلى رموز. حاول الآن استخدام نفس الشفرة، لكن مع استبدال النموذج بـ `HuggingFaceH4/zephyr-7b-beta` ، وستحصل على: ```text <|user|> Hello, how are you?</s> <|assistant|> I'm doing great. How can I help you today?</s> <|user|> I'd like to show off how chat templating works!</s> ``` تم ضبط كل من Zephyr و Mistral-Instruct من نفس النموذج الأصلي ، Mistral-7B-v0.1. ومع ذلك ، فقد تم تدريبهم بتنسيقات دردشة مختلفة تمامًا. بدون قوالب المحادثة، ستضطر إلى كتابة شفرة تنسيق يدويًا لكل نموذج ، ومن السهل جدًا ارتكاب أخطاء بسيطة تؤثر على الأداء! تُدير قوالب المحادثة تفاصيل التنسيق نيابةً عنك ، مما يُتيح لك كتابة شفرة عامة تعمل مع أي نموذج. ## كيف أستخدم قوالب الدردشة؟ كما رأيت في المثال السابق، من السهل استخدام قوالب الدردشة. قم ببساطة بإنشاء قائمة من الرسائل، مع مفتاحي `role` و`content`، ثم قم بتمريرها إلى [`~PreTrainedTokenizer.apply_chat_template`] . بمجرد قيامك بذلك، ستحصل على مخرجات جاهزة للاستخدام! عند استخدام قوالب الدردشة كإدخال لتوليد نصوص بواسطة النموذج، فمن الجيد أيضًا استخدام `add_generation_prompt=True` لإضافة [مطالبات توليد النصوص](#what-are-generation-prompts). فيما يلي مثال على إعداد الإدخال لـ `model.generate()`، باستخدام Zephyr مرة أخرى: ```python from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "HuggingFaceH4/zephyr-7b-beta" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint) # قد ترغب في استخدام bfloat16 و/أو الانتقال إلى GPU هنا messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") print(tokenizer.decode(tokenized_chat[0])) ``` سيؤدي هذا إلى إنتاج سلسلة نصية بتنسيق الإدخال الذي يتوقعه Zephyr. ```text <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> ``` الآن بعد أن تم تنسيق الإدخال بشكل صحيح لـ Zephyr، يمكننا استخدام النموذج لإنشاء رد على سؤال المستخدم: ```python outputs = model.generate(tokenized_chat, max_new_tokens=128) print(tokenizer.decode(outputs[0])) ``` سيؤدي هذا إلى ما يلي: ```text <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all. ``` كان ذلك سهلاً بعد كل شيء ! ## هل هناك قنوات معالجة أوتوماتيكية للدردشة؟ نعم يوجد ! تدعم قنوات المعالجة توليد النصوص مدخلات الدردشة ، مما يُسهّل استخدام نماذج الدردشة . في الماضي ، كنا نستخدم فئة "ConversationalPipeline" المُخصّصة ، ولكن تم الآن إيقافها وتم دمج وظائفها في [`TextGenerationPipeline`]. دعونا نجرّب مثال Zephyr مرة أخرى ، ولكن هذه المرة باستخدام قناة معالجة: ```python from transformers import pipeline pipe = pipeline("text-generation", "HuggingFaceH4/zephyr-7b-beta") messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] print(pipe(messages, max_new_tokens=128)[0]['generated_text'][-1]) # طباعة استجابة المساعد ``` ```النص {'role': 'assistant', 'content': "Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all."} ``` سيُراعي قناة المعالجة جميع تفاصيل تقسيم النص إلى رموز واستدعاء apply_chat_template نيابةً عنك - بمجرد أن يصبح لِدى النموذج قالب دردشة ، فكل ما تحتاج إلى القيام به هو تهيئة قناة معالجة وتمرير قائمة الرسائل إليها! ## ما هي "مطالبات التوليد"؟ قد تلاحظ أن طريقة `apply_chat_template` لها معامل `add_generation_prompt`. تخبر هذه المعامل القالب بإضافة رموز تشير إلى بداية رد البوت. على سبيل المثال، ضع في اعتبارك الدردشة التالية: ```python messages = [ {"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": "Nice to meet you!"}, {"role": "user", "content": "Can I ask a question?"} ] ``` إليك كيف سيبدو ذلك بدون موجه توليد نصوص ، بالنسبة لنموذج يستخدم تنسيق "ChatML" القياسي : ```python tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False) """<|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> """ ``` وهكذا يبدو الأمر **مع** مطالبة التوليد: ```python tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) """<|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> <|im_start|>assistant """ ``` لاحظ أننا أضفنا هذه المرة الرموز التي تشير إلى بداية رد البوت. يضمن هذا أنه عندما يُولّد النموذج نصًا فسيكتب رد البوت بدلاً من القيام بشيء غير متوقع، مثل الاستمرار في رسالة المستخدم. تذكر، أن نماذج الدردشة لا تزال مجرد نماذج للغة - فهي مدربة على متابعة النصوص، والدردشة هي مجرد نوع خاص من النصوص بالنسبة لها! يجب توجيهها برموز تحكم مناسبة، حتى تعرف ما الذي يجب عليها فعله. لا تتطلب جميع النماذج الرموز التحكمية لتوليد نصوص . بعض النماذج ، مثل LLaMA ، ليس لديها أي رموز خاصة قبل ردود البوت . في هذه الحالات ، لن يكون لمعامل `add_generation_prompt` أي تأثير. يعتمد التأثير الدقيق الذي تُحدثه `add_generation_prompt` على القالب المستخدم . ## ما وظيفة "continue_final_message"؟ عند تمرير قائمة من الرسائل إلى `apply_chat_template` أو `TextGenerationPipeline` ، يمكنك اختيار تنسيق المحادثة بحيث يواصل النموذج الرسالة الأخيرة في المحادثة بدلاً من بدء رسالة جديدة. يتم ذلك عن طريق إزالة أي رموز نهاية التسلسل التي تشير إلى نهاية الرسالة الأخيرة ، بحيث يقوم النموذج ببساطة بتمديد الرسالة الأخيرة عندما يبدأ في توليد النص . يُعد هذا أمرًا مفيدًا "لِمَلء بداية" رد النموذج مُسبقًا. وهنا مثال: ```python chat = [ {"role": "user", "content": "Can you format the answer in JSON?"}, {"role": "assistant", "content": '{"name": "'}, ] formatted_chat = tokenizer.apply_chat_template(chat, tokenize=True, return_dict=True, continue_final_message=True) model.generate(**formatted_chat) ``` سيقوم النموذج بتوليد نص يكمل سلسلة JSON ، بدلاً من بدء رسالة جديدة . يمكن أن يكون هذا النهج مفيدًا جدًا لتحسين دقة اتباع النموذج للإرشادات عندما تعرف كيف تريد أن يبدأ ردوده . . نظرًا لأن `add_generation_prompt` تضيف الرموز التي تبدأ رسالة جديدة ، و `continue_final_message` تزيل أي رموز نهاية الرسالة من الرسالة الأخيرة ، فليس من المنطقي استخدامهما معًا . ونتيجة لذلك ، ستتلقّى خطأً إذا حاولت ذلك ! السلوك الافتراضي لِـ `TextGenerationPipeline` هو تعيين `add_generation_prompt=True` بحيث تبدأ رسالة جديدة . ومع ذلك ، إذا كانت الرسالة الأخيرة في المحادثة التي تم إدخالها لديها دور "assistant" ، فسوف تفترض أن هذه الرسالة هي "مَلء بداية" وتتحوّل إلى `continue_final_message=True` بدلاً من ذلك ، لأن مُعظم النماذج لا تدعم عدة رسائل متتالية للمساعد . يمكنك تجاوز هذا السلوك عن طريق تمرير معامل `continue_final_message` بشكل صريح عند استدعاء قناة المعالجة . ## هل يمكنني استخدام قوالب الدردشة في التدريب؟ نعم ! تُعد هذه طريقة جيدة للتأكد من أن قالب الدردشة يتطابق مع الرموز التي يراها النموذج أثناء التدريب . نوصي بتطبيق قالب الدردشة كخطوة معالجة أولية لمجموعة بياناتك . بعد ذلك ، يمكنك ببساطة متابعة عملية التدريب كما هو الحال مع أي مهمة تدريب نماذج لغات أخرى . عند التدريب ، يجب أن تُعيّن عادةً `add_generation_prompt=False` ، لأنه لن تكون الرموز المُضافة لتحفيز رد المساعد مفيدة أثناء التدريب . دعونا نرى مثالاً : ```python from transformers import AutoTokenizer from datasets import Dataset tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") chat1 = [ {"role": "user", "content": "Which is bigger, the moon or the sun?"}, {"role": "assistant", "content": "The sun."} ] chat2 = [ {"role": "user", "content": "Which is bigger, a virus or a bacterium?"}, {"role": "assistant", "content": "A bacterium."} ] dataset = Dataset.from_dict({"chat": [chat1, chat2]}) dataset = dataset.map(lambda x: {"formatted_chat": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)}) print(dataset['formatted_chat'][0]) ``` ونحصل على: ```text <|user|> Which is bigger, the moon or the sun?</s> <|assistant|> The sun.</s> ``` من هنا، استمر في التدريب كما تفعل مع مهمة نمذجة اللغة القياسية، باستخدام عمود `formatted_chat`. <Tip> بشكل افتراضي ، تضيف بعض *tokenizers* رموزًا خاصة مثل `<bos>` و `<eos>` إلى النص الذي تقوم بتقسيمه إلى رموز. يجب أن تتضمن قوالب المحادثة بالفعل جميع الرموز الخاصة التي تحتاجها ، وبالتالي فإن الرموز الخاصة الإضافية ستكون غالبًا غير صحيحة أو مُكررة ، مما سيؤثر سلبًا على أداء النموذج . لذلك ، إذا قمت بتنسيق النص باستخدام `apply_chat_template(tokenize=False)` ، فيجب تعيين المعامل `add_special_tokens=False` عندما تقوم بتقسيم ذلك النص إلى رموز لاحقًا . إذا كنت تستخدم `apply_chat_template(tokenize=True)` ، فلن تحتاج إلى القلق بشأن ذلك ! </Tip> ## متقدّم: مدخلات إضافية لِقوالب الدردشة المعامل الوحيدة التي تتطلبها طريقة `apply_chat_template` هي `messages`. ومع ذلك، يمكنك تمرير أي معامل ككلمة مفتاحية إلى `apply_chat_template` وستكون متاحة داخل القالب. يمنحك هذا الكثير من المرونة لاستخدام قوالب الدردشة للعديد من الأشياء. لا توجد قيود على أسماء هذه المعامﻻت أو تنسيقاتها - يمكنك تمرير سلاسل نصية أو قوائم أو قواميس أو أي شيء آخر تريده. ومع ذلك، هناك بعض الحالات الشائعة لاستخدام هذه المعامﻻت الإضافية، مثل تمرير أدوات لاستدعاء الوظائف، أو المستندات لإنشاء النصوص المُعزّزة بالاسترجاع. في هذه الحالات الشائعة، لدينا بعض التوصيات المُحدّدة حول أسماء هذه المعامﻻت وتنسيقاتها، والتي يتم وصفها في الأقسام التالية. نشجع مطوّري النماذج على جعل قوالب الدردشة الخاصة بهم متوافقة مع هذا التنسيق، لتسهيل نقل التعليمات البرمجية لاستدعاء الأدوات بين النماذج. ## متقدم: استخدام الأداة / استدعاء الدالة يمكن لنماذج "استخدام الأداة" اختيار استدعاء الدوال كأدوات خارجية قبل توليد الإجابة. عند تمرير الأدوات إلى نموذج استخدام الأدوات، يمكنك ببساطة تمرير قائمة من الوظائف إلى معامل `tools`: ```python import datetime def current_time(): """Get the current local time as a string.""" return str(datetime.now()) def multiply(a: float, b: float): """ A function that multiplies two numbers Args: a: The first number to multiply b: The second number to multiply """ return a * b tools = [current_time, multiply] model_input = tokenizer.apply_chat_template( messages, tools=tools ) ``` لكي يعمل هذا بشكل صحيح، يجب عليك كتابة وظائفك بالتنسيق السابق، حتى يمكن تحليلها بشكل صحيح كأدوات. على وجه التحديد، يجب عليك اتباع هذه القواعد: - يجب أن يكون للدالة اسم وصفي. - يجب أن يكون لكل معامل نوع للتلميح. - يجب أن تحتوي الدالة على سلسلة مستندية بتنسيق Google القياسي (بمعنى وصف الدالة الأولي متبوعًا بكتلة `Args:` التي تصف المعاﻻت، ما لم تكن الدالة لا تحتوي على أي معامﻻت. - لا تقم بتضمين الأنواع في كتلة `Args:` . بعبارة أخرى، اكتب `a: The first number to multiply`، وليس `a (int): The first number to multiply`. يجب أن تذهب تلميحات الأنواع في رأس الدالة بدلاً من ذلك. - يمكن أن يكون للدالة نوع للإرجاع ومربع `Returns:` في السلسلة. ومع ذلك، فهذه اختيارية لأن معظم نماذج استخدام الأدوات تتجاهلها. ### تمرير نتائج الأداة إلى النموذج يكفي الكود السابقة لسرد الأدوات المتاحة لنموذجك، ولكن ماذا يحدث إذا أراد النموذج استخدام واحدة منها؟ إذا حدث ذلك، فيجب عليك: 1. تحليل مخرجات النموذج للحصول على اسم (أسماء) الأدوات ومعامﻻتها. 2. أضف استدعاء (استدعاءات) النموذج لِلأدوات إلى المحادثة. 3. استدعاء الدالة (الدالات) المقابلة بتلك المعامﻻت. 4. أضف النتيجة (النتائج) إلى المحادثة ### مثال كامل على استخدام الأداة سنستعرض مثالاً على استخدام الأدوات خطوة بخطوة . في هذا المثال ، سنستخدم نموذج `Hermes-2-Pro` بحجم 8 مليارات معامل ، نظرًا لأنه أحد أعلى نماذج استخدام الأدوات أداءً في فئة حجمه وقت كتابة هذا النص . إذا كان لديك الذاكرة الكافية ، فيمكنك النظر في استخدام نموذج أكبر بدلاً من ذلك مثل `Command-R` أو `Mixtral-8x22B` ، وكلاهما يدعم استخدام الأدوات ويوفر أداءً أقوى . أولاً ، لنقم بتحميل نموذجنا و tokenizer الخاص بنا: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "NousResearch/Hermes-2-Pro-Llama-3-8B" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.bfloat16, device_map="auto") ```python messages = [ {"role": "system", "content": "You are a bot that responds to weather queries. You should reply with the unit used in the queried location."}, {"role": "user", "content": "Hey, what's the temperature in Paris right now?"} ] ``` الآن، لنقم نطبق قالب الدردشة ونولد رد: ```python inputs = tokenizer.apply_chat_template(messages, chat_template="tool_use", tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt") inputs = {k: v.to(model.device) for k, v in inputs.items()} out = model.generate(**inputs, max_new_tokens=128) print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):])) ``` ونحصل على: ```text <tool_call> {"arguments": {"location": "Paris, France", "unit": "celsius"}, "name": "get_current_temperature"} </tool_call><|im_end|> ``` لقد قام النموذج باستدعاء الدالة مع معامﻻت صحيحة، بالصيغة التي طلبتها توثيق الدالة. لقد استنتج أننا نشير على الأرجح إلى باريس في فرنسا، وتذكر أنه بكونها موطن وحدات القياس الدولية، يجب عرض درجة الحرارة في فرنسا بالدرجة المئوية. دعنا نضيف استدعاء الأداة الخاص بالنموذج إلى المحادثة. لاحظ أننا نولد معرف استدعاء أداة عشوائيًا هنا. لا تستخدم جميع النماذج هذه المعرفات، ولكنها تسمح للنماذج بإصدار عدة استدعاءات للأدوات في نفس الوقت وتتبع الاستجابة المقابلة لكل استدعاء. يمكنك توليد هذه المعرفات بأي طريقة تريدها، ولكن يجب أن تكون فريدة داخل كل محادثة. ```python tool_call_id = "vAHdf3" # Random ID, should be unique for each tool call tool_call = {"name": "get_current_temperature", "arguments": {"location": "Paris, France", "unit": "celsius"}} messages.append({"role": "assistant", "tool_calls": [{"id": tool_call_id, "type": "function", "function": tool_call}]}) ``` الآن بعد أن أضفنا استدعاء الأداة إلى المحادثة، يمكننا استدعاء الدالة وإضافة النتيجة إلى المحادثة. نظرًا لأننا نستخدم دالة وهمية لهذا المثال والتي تعيد دائمًا 22.0، فيمكننا ببساطة إضافة تلك النتيجة مباشرةً. لاحظ معرف استدعاء الأداة - يجب أن يتطابق مع المعرف المستخدم في استدعاء الأداة أعلاه. ```python messages.append({"role": "tool", "tool_call_id": tool_call_id, "name": "get_current_temperature", "content": "22.0"}) ``` أخيرًا، دعنا نجعل المساعد يقرأ مخرجات الدالة ويكمل الدردشة مع المستخدم: ```python inputs = tokenizer.apply_chat_template(messages, chat_template="tool_use", tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt") inputs = {k: v.to(model.device) for k, v in inputs.items()} out = model.generate(**inputs, max_new_tokens=128) print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):])) ``` ونحصل على: ```text The current temperature in Paris, France is 22.0 ° Celsius.<|im_end|> ``` <Tip> لا تستخدم جميع نماذج استخدام الأدوات جميع ميزات استدعاء الأدوات الموضحة أعلاه. يستخدم البعض معرفات استدعاء الأدوات، بينما يستخدم البعض الآخر ببساطة اسم الدالة ويقارن استدعاءات الأدوات بالنتائج باستخدام الترتيب، وهناك عدة نماذج لا تستخدم أيًا منهما ولا تصدر سوى استدعاء أداة واحد في كل مرة لتجنب الارتباك. إذا كنت تريد أن يكون رمزك متوافقًا مع أكبر عدد ممكن من النماذج، فإننا نوصي بهيكلة استدعاءات الأدوات الخاصة بك كما هو موضح هنا، وإعادة نتائج الأدوات بالترتيب الذي أصدرها النموذج. يجب أن تتعامل قوالب الدردشة على كل نموذج مع الباقي. </Tip> ### فهم مخططات الأدوات يتم تحويل كل دالة تقوم بتمريرها إلى معامل `tools` في دالة `apply_chat_template` إلى [مخطط JSON](https://json-schema.org/learn/getting-started-step-by-step). يتم بعد ذلك تمرير هذه المخططات إلى قالب الدردشة النموذج. وبعبارة أخرى، فإن نماذج استخدام الأدوات لا ترى دوالك مباشرة، ولا ترى مطلقًا الكود الموجود بداخلها. ما يهمها هو**تعريفات** الدوال و**المعامﻻت** التي تحتاج إلى تمريرها إليها - فهي تهتم بما تفعله الأدوات وكيفية استخدامها، وليس بكيفية عملها! يقع على عاتقك قراءة مخرجاتها، والكشف عما إذا كانت قد طلبت استخدام أداة، وتمرير المعامﻻت إلى دالة الأداة، وإرجاع الرد في الدردشة. يجب أن يكون إنشاء مخططات JSON لتمريرها إلى القالب تلقائيًا وغير مرئي طالما أن دوالك تتبع المواصفات الموضحة أعلاه، ولكن إذا واجهت مشكلات، أو إذا كنت تريد ببساطة مزيدًا من التحكم في التحويل، فيمكنك التعامل مع التحويل يدويًا. فيما يلي مثال على تحويل مخطط يدوي: ```python from transformers.utils import get_json_schema def multiply(a: float, b: float): """ A function that multiplies two numbers Args: a: The first number to multiply b: The second number to multiply """ return a * b schema = get_json_schema(multiply) print(schema) ``` سيؤدي هذا إلى ما يلي: ```json { "type": "function", "function": { "name": "multiply", "description": "A function that multiplies two numbers", "parameters": { "type": "object", "properties": { "a": { "type": "number", "description": "The first number to multiply" }, "b": { "type": "number", "description": "The second number to multiply" } }, "required": ["a", "b"] } } } ``` إذا كنت ترغب في ذلك، يمكنك تحرير هذه المخططات، أو حتى كتابتها من البداية بنفسك دون استخدام `get_json_schema` على الإطلاق. يمكن تمرير مخططات JSON مباشرةً إلى معامل `tools` في `apply_chat_template` - يمنحك هذا الكثير من القوة لتعريف مخططات دقيقة لوظائف أكثر تعقيدًا. ولكن كن حذرًا - كلما زاد تعقيد مخططاتك، زاد احتمال ارتباك النموذج عند التعامل معها! نوصي بتوقيعات دوال بسيطة حيثما أمكن، مع تقليل المعامﻻت (وخاصة المعامﻻت المعقدة والمتداخلة) إلى الحد الأدنى. فيما يلي مثال على تعريف المخططات يدويًا، وتمريرها مباشرةً إلى `apply_chat_template`: ```python # A simple function that takes no arguments current_time = { "type": "function", "function": { "name": "current_time", "description": "Get the current local time as a string.", "parameters": { 'type': 'object', 'properties': {} } } } # A more complete function that takes two numerical arguments multiply = { 'type': 'function', 'function': { 'name': 'multiply', 'description': 'A function that multiplies two numbers', 'parameters': { 'type': 'object', 'properties': { 'a': { 'type': 'number', 'description': 'The first number to multiply' }, 'b': { 'type': 'number', 'description': 'The second number to multiply' } }, 'required': ['a', 'b'] } } } model_input = tokenizer.apply_chat_template( messages, tools = [current_time, multiply] ) ``` ## متقدم: توليد قائم على الاسترجاع يمكن لنماذج اللغة الكبيرة من نوع "توليد قائم على الاسترجاع" أو "RAG" البحث في مجموعة نصوص عن معلومات قبل الرد على الاستعلام. يسمح هذا للنماذج بتوسيع قاعدة معارفها بشكل كبير إلى ما هو أبعد من حجم سياقها المحدود. توصيتنا لنماذج RAG هي أن يقبل قالبها وسيطة `documents`. يجب أن تكون هذه قائمة من المستندات، حيث يكون كل "مستند" عبارة عن قاموس واحد بمفاتيح `title` و `contents`، وكلاهما سلاسل نصية. نظرًا لأن هذا التنسيق أبسط بكثير من مخططات JSON المستخدمة للأدوات، فلا توجد حاجة إلى دوال مساعدة. فيما يلي مثال على قالب RAG بالفعل: ```python from transformers import AutoTokenizer, AutoModelForCausalLM # تحميل النموذج والمجزىء اللغوي model_id = "CohereForAI/c4ai-command-r-v01-4bit" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto") device = model.device # الحصول على الجهاز الذي تم تحميل النموذج عليه # تعريف مُدخلات المحادثة conversation = [ {"role": "user", "content": "What has Man always dreamed of?"} ] # تعريف المستندات لتوليد قائم على الاسترجاع documents = [ { "title": "The Moon: Our Age-Old Foe", "text": "Man has always dreamed of destroying the moon. In this essay, I shall..." }, { "title": "The Sun: Our Age-Old Friend", "text": "Although often underappreciated, the sun provides several notable benefits..." } ] # معالجة المحادثة والمستندات باستخدام قالب RAG، وإرجاع موترات PyTorch. input_ids = tokenizer.apply_chat_template( conversation=conversation, documents=documents, chat_template="rag", tokenize=True, add_generation_prompt=True, return_tensors="pt").to(device) # توليد الرد gen_tokens = model.generate( input_ids, max_new_tokens=100, do_sample=True, temperature=0.3, ) # فك تشفير النص المُوَلّد وطباعته gen_text = tokenizer.decode(gen_tokens[0]) print(gen_text) ``` إن مُدخل documents للتوليد القائم على الاسترجاع غير مدعوم على نطاق واسع، والعديد من النماذج لديها قوالب دردشة تتجاهل هذا المُدخل ببساطة. للتحقق مما إذا كان النموذج يدعم مُدخل `documents`، يمكنك قراءة بطاقة النموذج الخاصة به، أو `print(tokenizer.chat_template)` لمعرفة ما إذا كان مفتاح `documents` مستخدمًا في أي مكان. <Tip> ومع ذلك، فإن أحد فئات النماذج التي تدعمه هي [Command-R](https://huggingface.co/CohereForAI/c4ai-command-r-08-2024) و [Command-R+](https://huggingface.co/CohereForAI/c4ai-command-r-pluse-08-2024) من Cohere، من خلال قالب الدردشة rag الخاص بهم. يمكنك رؤية أمثلة إضافية على التوليد باستخدام هذه الميزة في بطاقات النموذج الخاصة بهم. </Tip> ## متقدم: كيف تعمل قوالب الدردشة؟ يتم تخزين قالب الدردشة للنموذج في الخاصية `tokenizer.chat_template`. إذا لم يتم تعيين قالب دردشة، فسيتم استخدام القالب الافتراضي لفئة النموذج هذه بدلاً من ذلك. دعونا نلقي نظرة على قالب دردشة `Zephyr`، ولكن لاحظ أن هذا القالب مُبسّط قليلاً عن القالب الفعلي! ``` {%- for message in messages %} {{- '<|' + message['role'] + |>\n' }} {{- message['content'] + eos_token }} {%- endfor %} {%- if add_generation_prompt %} {{- '<|assistant|>\n' }} {%- endif %} ``` إذا لم تكن قد رأيت أحد هذه القوالب من قبل، فهذا [قالب Jinja](https://jinja.palletsprojects.com/en/3.1.x/templates/) .Jinja هي لغة قوالب تسمح لك بكتابة تعليمات برمجية بسيطة تُوَلّد نصًا. من نواحٍ عديدة، يُشبه الرمز والتركيب للغة Python. أما في لغة Python، سيبدو هذا القالب كما يلي: ```python for message in messages: print(f'<|{message["role"]}|>') print(message['content'] + eos_token) if add_generation_prompt: print('<|assistant|>') ``` يقوم القالب بثلاثة أشياء بشكل فعال: - لكل رسالة، بطبع الدور مُحاطًا بـ `<|` و `|>`، مثل `<|user|>` أو `<|assistant|>`. - بعد ذلك، يطبع محتوى الرسالة، متبوعًا برمز نهاية التسلسل `eos_token` . - أخيرًا، إذا تم تعيين `add_generation_prompt` ، يطبع الرمز المساعد، حتى يعرف النموذج أنه يجب أن يبدأ في توليد استجابة المساعد. هذا قالب بسيط جدًا، لكن Jinja تمنحك الكثير من المرونة للقيام بأشياء أكثر تعقيدًا! دعونا نرى قالب Jinja يُمكنه تنسيق المُدخلات بطريقة تُشبه الطريقة التي تُنسّق بها LLaMA مُدخلاتها (لاحظ أن قالب LLaMA الحقيقي يتضمن معالجة لرسائل النظام الافتراضية ومعالجة رسائل النظام بشكل مختلف قليلاً بشكل عام - لا تستخدم هذا القالب في التعليمات البرمجية الفعلية الخاصة بك!) ``` {%- for message in messages %} {%- if message['role'] == 'user' %} {{- bos_token + '[INST] ' + message['content'] + ' [/INST]' }} {%- elif message['role'] == 'system' %} {{- '<<SYS>>\\n' + message['content'] + '\\n<</SYS>>\\n\\n' }} {%- elif message['role'] == 'assistant' %} {{- ' ' + message['content'] + ' ' + eos_token }} {%- endif %} {%- endfor %} ``` نأمل أنه إذا حدقت في هذا لفترة قصيرة، يمكنك أن ترى ما يفعله هذا القالب - فهو يُضيف رموزًا مُحددة مثل `[INST]` و `[/INST]` بناءً على دور كل رسالة. يمكن تمييز رسائل المستخدم والمساعد والنظام بوضوح للنموذج بسبب الرموز التي تُحيط بها. ## متقدم: إضافة وتعديل قوالب الدردشة ### كيف أنشئ قالب دردشة؟ ببساطة، اكتب قالب Jinja واضبط `tokenizer.chat_template`. قد تجد أنه من الأسهل البدء بقالب موجود من نموذج آخر وتحريره ببساطة ليناسب احتياجاتك! على سبيل المثال، يمكننا أن نأخذ قالب LLaMA أعلاه ونضيف `[ASST]` و `[/ASST]` إلى رسائل المساعد: ``` {%- for message in messages %} {%- if message['role'] == 'user' %} {{- bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }} {%- elif message['role'] == 'system' %} {{- '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }} {%- elif message['role'] == 'assistant' %} {{- '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }} {%- endif %} {%- endfor %} ``` الآن، اضبط ببساطة الخاصية `tokenizer.chat_template`. في المرة القادمة التي تستخدم فيها [`~PreTrainedTokenizer.apply_chat_template`] ، سيستخدم القالب الجديد الخاص بك! سيتم حفظ هذه الخاصية في ملف `tokenizer_config.json`، حتى تتمكن من استخدام [`~utils.PushToHubMixin.push_to_hub`] لتحميل قالبك الجديد إلى Hub والتأكد من أن الجميع يستخدم القالب الصحيح لنموذجك! ```python template = tokenizer.chat_template template = template.replace("SYS", "SYSTEM") # تغيير رمز النظام tokenizer.chat_template = template # تعيين القالب الجديد tokenizer.push_to_hub("model_name") # تحميل القالب الجديد إلى Hub! ``` يتم استدعاء الدالة [`~PreTrainedTokenizer.apply_chat_template`] الذي نستخدم قالب الدردشة الخاص بك بواسطة فئة [`TextGenerationPipeline`] لذلك بمجرد تعيين قالب الدردشة الصحيح، سيصبح نموذجك متوافقًا تلقائيًا مع [`TextGenerationPipeline`]. <Tip> إذا كنت تُجري ضبطًا دقيقًا لنموذج للدردشة، بالإضافة إلى تعيين قالب دردشة، فربما يجب عليك إضافة أي رموز تحكم دردشة جديدة كرموز خاصة في المجزىء اللغوي. لا يتم تقسيم الرموز الخاصة أبدًا، مما يضمن معالجة رموز التحكم الخاصة بك دائمًا كرموز فردية بدلاً من تجزئتها إلى أجزاء. يجب عليك أيضًا تعيين خاصية `eos_token` للمجزىء اللغوي إلى الرمز الذي يُشير إلى نهاية توليدات المساعد في قالبك. سيضمن هذا أن أدوات توليد النصوص يمكنها تحديد وقت إيقاف توليد النص بشكل صحيح. </Tip> ### لماذا تحتوي بعض النماذج على قوالب متعددة؟ تستخدم بعض النماذج قوالب مختلفة لحالات استخدام مختلفة. على سبيل المثال، قد تستخدم قالبًا واحدًا للدردشة العادية وآخر لاستخدام الأدوات، أو التوليد القائم على الاسترجاع. في هذه الحالات، تكون `tokenizer.chat_template` قاموسًا. يمكن أن يتسبب هذا في بعض الارتباك، وحيثما أمكن، نوصي باستخدام قالب واحد لجميع حالات الاستخدام. يمكنك استخدام عبارات Jinja مثل `if tools is defined` وتعريفات `{% macro %}` لتضمين مسارات تعليمات برمجية متعددة بسهولة في قالب واحد. عندما يحتوي المعالج اللغوي على قوالب متعددة، ستكون `tokenizer.chat_template dict`، حيث يكون كل مفتاح هو اسم قالب. يحتوي أسلوب `apply_chat_template` على معالجة خاصة لأسماء قوالب مُعينة: على وجه التحديد، سيبحث عن قالب باسم `default` في معظم الحالات، وسيُثير خطأً إذا لم يتمكن من العثور على واحد. ومع ذلك، إذا كان هناك قالب باسم `tool_use` عندما قام المستخدم بتمرير وسيطة `tools`، فسيستخدم هذا القالب بدلاً من ذلك. للوصول إلى قوالب بأسماء أخرى، مرر اسم القالب الذي تُريده إلى وسيطة `chat_template` لـ `apply_chat_template()`. نجد أن هذا قد يكون مُربكًا بعض الشيء للمستخدمين - لذلك إذا كنت تكتب قالبًا بنفسك، فننصحك بمحاولة وضعه كله في قالب واحد حيثما أمكن! ## ما القالب الذي يجب أن أستخدمه؟ عند تعيين قالب لنموذج تم تدريبه بالفعل على الدردشة، يجب التأكد من أن القالب يتطابق تمامًا مع تنسيق الرسالة الذي شاهده النموذج أثناء التدريب، وإلا فمن المحتمل أن تواجه تدهورًا في الأداء. هذا صحيح حتى إذا كنت تدرب النموذج بشكل إضافي - فمن المحتمل أن تحصل على أفضل أداء إذا قمت بإبقاء رموز الدردشة ثابتة. يُشبه هذا إلى حد كبير عملية التجزئة - فأنت تحصل بشكل عام على أفضل أداء للاستدلال أو الضبط الدقيق عندما تتطابق بدقة مع التجزئة المستخدمة أثناء التدريب. من ناحية أخرى، إذا كنت تُدرّب نموذجًا من البداية، أو تقوم بضبط دقيق لنموذج لغة أساسي للدردشة، لديك حرية اختيار قالب مناسب! تتمتع LLMs بالذكاء الكافي للتعامل مع العديد من تنسيقات الإدخال المختلفة. أحد الخيارات الشائعة هو تنسيق "ChatML"، وهو خيار جيد ومرن للعديد من حالات الاستخدام. يبدو كالتالي: ``` {%- for message in messages %} {{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }} {%- endfor %} ``` إذا أعجبك هذا، فإليك نسخة جاهزة لوضعها في كودك. يتضمن الخط المفرد أيضًا دعمًا مفيدًا [لإرشادات التوليد](#what-are-generation-prompts)، ولكن لاحظ أنه لا يضيف رموز BOS أو EOS! إذا كان نموذجك يتوقع هذه الرموز، فلن يتم إضافتها تلقائيًا بواسطة "apply_chat_template" - بمعنى آخر، سيتم تجزئة النص باستخدام "add_special_tokens=False". هذا لتجنب التعارضات المحتملة بين القالب ومنطق "add_special_tokens". إذا كان نموذجك يتوقع رموزًا خاصة، فتأكد من إضافتها إلى القالب! ```python tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" ``` يُحيط هذا القالب كل رسالة بين الرمزين "<|im_start|>" و "<|im_end|>"، ويكتب ببساطة الدور كسلسلة نصية، مما يسمح بالمرونة في الأدوار التي تتدرب عليها. يبدو الناتج كما يلي: ```text <|im_start|>system You are a helpful chatbot that will do its best not to say anything so stupid that people tweet about it.<|im_end|> <|im_start|>user How are you?<|im_end|> <|im_start|>assistant I'm doing great!<|im_end|> ``` تعد أدوار "user" و "system" و "assistant" هي الأدوار القياسية للدردشة، ونوصي باستخدامها عندما يكون ذلك منطقيًا، خاصة إذا كنت تريد أن يعمل نموذجك بشكل جيد مع [`TextGenerationPipeline`]. ومع ذلك، فأنت لست مقيدًا بهذه الأدوار - فإن القوالب مرنة للغاية، ويمكن أن تكون أي سلسلة نصية دورًا. ## أريد إضافة بعض قوالب الدردشة! كيف أبدأ؟ إذا كان لديك أي نماذج دردشة، فيجب عليك تعيين الخاصية "tokenizer.chat_template" الخاصة بها واختبارها باستخدام [`~PreTrainedTokenizer.apply_chat_template`]، ثم رفع المجزىء اللغوي المُحدّث إلى Hub. ينطبق هذا حتى إذا لم تكن مالك النموذج - إذا كنت تستخدم نموذجًا بقالب دردشة فارغ، أو لا يزال يستخدم قالب الفئة الافتراضية، فيرجى فتح [طلب سحب](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) إلى مستودع النموذج حتى يمكن تعيين الخاصية بشكل صحيح! بمجرد تعيين الخاصية، هذا كل شيء، لقد انتهيت! ستعمل "tokenizer.apply_chat_template" الآن بشكل صحيح لهذا النموذج، مما يعني أنها مدعومة أيضًا بشكل تلقائي في أماكن مثل "TextGenerationPipeline"! من خلال ضمان امتلاك النماذج لهذه الخاصية، يُمكننا التأكد من أن المجتمع بأكمله يستخدم القوة الكاملة للنماذج مفتوحة المصدر. لقد كانت عدم تطابق التنسيق تطارد المجال وأضرت الأداء بصمت لفترة طويلة جدًا - لقد حان الوقت لوضع حد لها! ## متقدم: نصائح لكتابة القوالب <Tip> أسهل طريقة للبدء في كتابة قوالب Jinja هي إلقاء نظرة على بعض القوالب الموجودة. يمكنك استخدام `print(tokenizer.chat_template)` لأي نموذج دردشة لمعرفة القالب الذي يستخدمه. بشكل عام، تحتوي النماذج التي تدعم استخدام الأدوات على قوالب أكثر تعقيدًا بكثير من النماذج الأخرى - لذلك عندما تبدأ للتو، فمن المحتمل أنها مثال سيئ للتعلم منه! يمكنك أيضًا إلقاء نظرة على [وثائق Jinja](https://jinja.palletsprojects.com/en/3.1.x/templates/#synopsis) للحصول على تفاصيل حول تنسيق Jinja العام وتركيبه. </Tip> تُطابق قوالب Jinja في `transformers` قوالب Jinja في أي مكان آخر. الشيء الرئيسي الذي يجب معرفته هو أن سجل الدردشة سيكون متاحًا داخل قالبك كمتغير يسمى `messages`. ستتمكن من الوصول إلى `messages` في قالبك تمامًا كما يمكنك في Python، مما يعني أنه يمكنك التكرار خلاله باستخدام `{% for message in messages %}` أو الوصول إلى رسائل فردية باستخدام `{{ messages[0] }}`، على سبيل المثال. يمكنك أيضًا استخدام النصائح التالية لكتابة قوالب Jinja نظيفة وفعالة: ### إقتطاع المسافات الفارغة بشكل افتراضي، ستطبع Jinja أي مسافات فارغة تأتي قبل أو بعد كتلة. يمكن أن يكون هذا مشكلة لقوالب الدردشة، والتي تريد عادةً أن تكون دقيقة جدًا مع المسافات! لتجنب ذلك، نوصي بشدة بكتابة قوالبك على النحو التالي: ``` {%- for message in messages %} {{- message['role'] + message['content'] }} {%- endfor %} ``` بدلاً من ذلك: ``` {% for message in messages %} {{ message['role'] + message['content'] }} {% endfor %} ``` سيؤدي إضافة "-" إلى إزالة أي مسافات تأتي قبل الكتلة. يبدو المثال الثاني عادية، ولكن قد يتم تضمين السطر الجديد والمسافة البادئة في المخرجات، وهو على الأرجح ليس ما تُريده! ### المتغيرات الخاصة داخل قالبك، سيكون لديك حق الوصول إلى العديد من المتغيرات الخاصة. أهمها هو `messages`، والذي يحتوي على سجل الدردشة كقائمة من قواميس الرسائل. ومع ذلك، هناك العديد من المتغيرات الأخرى. لن يتم استخدام كل متغير في كل قالب. المتغيرات الأكثر شيوعًا هي: - `tools` تحتوي على قائمة بالأدوات بتنسيق مخطط JSON. ستكون `None` أو غير مُعرّفة إذا لم يتم تمرير أي أدوات. - `documents` تحتوي على قائمة من المستندات بالتنسيق `{"title": "العنوان", "contents": "المحتويات"}`، تُستخدم للتوليد المُعزز بالاسترجاع. ستكون `None` أو غير مُعرّفة إذا لم يتم تمرير أي مستندات. - `add_generation_prompt` هي قيمة منطقية تكون `True` إذا طلب المستخدم مُطالبة توليد، و `False` بخلاف ذلك. إذا تم تعيين هذا، فيجب أن يُضيف قالبك رأس رسالة مساعد إلى نهاية المحادثة. إذا لم يكن لدى نموذجك رأس مُحدد لرسائل المساعد، فيمكنك تجاهل هذا العلم. - **الرموز الخاصة** مثل `bos_token` و `eos_token`. يتم استخراجها من `tokenizer.special_tokens_map`. ستختلف الرموز الدقيقة المتاحة داخل كل قالب اعتمادًا على المجزىء اللغوي الأصلي. <Tip> يمكنك في الواقع تمرير أي `kwarg` إلى `apply_chat_template`، وستكون متاحة داخل القالب كمتغير. بشكل عام، نوصي بمحاولة الالتزام بالمتغيرات الأساسية المذكورة أعلاه، لأن ذلك سيجعل نموذجك أكثر صعوبة في الاستخدام إذا كان على المستخدمين كتابة تعليمات برمجية مخصصة لتمرير `kwargs` خاصة بالنموذج. ومع ذلك، فنحن نُدرك أن هذا المجال يتحرك بسرعة، لذلك إذا كانت لديك حالة استخدام جديدة لا تتناسب مع واجهة برمجة التطبيقات الأساسية، فلا تتردد في استخدام `kwarg` معامل جديد لها! إذا أصبح `kwarg` المعامل الجديد شائعًا، فقد نقوم بترقيته إلى واجهة برمجة التطبيقات الأساسية وإنشاء وتوثيق الخاص به. </Tip> ### دوال قابلة للاستدعاء هناك أيضًا قائمة قصيرة من الدوال القابلة للاستدعاء المتاحة لك داخل قوالبك. هذه هي: - `raise_exception(msg)`: تُثير `TemplateException`. هذا مفيد لتصحيح الأخطاء، ولإخبار المستخدمين عندما يفعلون شيئًا لا يدعمه قالبك. - `strftime_now(format_str)`: تُكافئ `datetime.now().strftime(format_str)` في Python. يُستخدم هذا للحصول على التاريخ/الوقت الحالي بتنسيق مُحدد، والذي يتم تضمينه أحيانًا في رسائل النظام. ### التوافق مع Jinja غير Python هناك تطبيقات متعددة لـ Jinja بلغات مختلفة. عادة ما يكون لها نفس التركيب، ولكن الاختلاف الرئيسي هو أنه عند كتابة قالبًا في Python، يمكنك استخدام أساليب Python، مثل ".lower()" على السلاسل أو ".items()" على القواميس. سيؤدي هذا إلى كسر إذا حاول شخص ما استخدام قالبك في تنفيذ غير Python لـ Jinja. تعد التطبيقات غير Python شائعة بشكل خاص في بيئات النشر، حيث تعد JS و Rust شائعة جدًا. لا تقلق، على الرغم من ذلك! هناك بعض التغييرات البسيطة التي يمكنك إجراؤها على قوالبك لضمان توافقها عبر جميع تطبيقات Jinja: - استبدل أساليب Python بمرشحات Jinja. عادة ما يكون لها نفس الاسم، على سبيل المثال، يصبح "string.lower()" عبارة عن "string|lower"، ويصبح "dict.items()" عبارة عن "dict|items". أحد التغييرات الملحوظة هو أن "string.strip()" يصبح "string|trim". راجع [قائمة المرشحات المدمجة](https://jinja.palletsprojects.com/en/3.1.x/templates/#builtin-filters) في وثائق Jinja لمزيد من المعلومات. - استبدل "True" و "False" و "None"، وهي خاصة بـ Python، بـ "true" و "false" و "none". - قد يؤدي عرض قاموس أو قائمة مباشرة إلى نتائج مختلفة في التطبيقات الأخرى (على سبيل المثال، قد تتغير مدخﻻت السلسلة النصية من علامات اقتباس مفردة ' إلى علامات اقتباس مزدوجة "). يمكن أن يساعد إضافة "tojson" في ضمان الاتساق هنا. ## كتابة مطالبات التوليد لقد ذكرنا أعلاه أن add_generation_prompt هو متغير خاص يمكن الوصول إليه داخل قالبك، ويتحكم فيه المستخدم من خلال تعيين معامل add_generation_prompt. إذا كان نموذجك يتوقع عنوان لرسائل المساعد، فيجب أن يدعم قالبك إضافة العنوان عند تعيين add_generation_prompt. فيما يلي مثال على قالب يُنسّق الرسائل بأسلوب ChatML، مع دعم مُطالبة التوليد: ```text {{- bos_token }} {%- for message in messages %} {{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }} {%- endfor %} {%- if add_generation_prompt %} {{- '<|im_start|>assistant\n' }} {%- endif %} ``` سيعتمد المحتوى الدقيق لعنوان المساعد على نموذجك المُحدد، ولكن يجب أن يكون دائمًا السلسلة النصية التي تُمثل بداية رسالة المساعد، بحيث إذا قام المستخدم بتطبيق قالبك باستخدام add_generation_prompt=True ثم قام بتوليد نص، سيكتب النموذج استجابة المساعد. لاحظ أيضًا أن بعض النماذج لا تحتاج إلى مُطالبة توليد، لأن رسائل المساعد تبدأ دائمًا فورًا بعد رسائل المستخدم. هذا شائع بشكل خاص لنماذج LLaMA و Mistral، حيث تبدأ رسائل المساعد فورًا بعد رمز [/INST] الذي ينهي رسائل المستخدم. في هذه الحالات، يمكن للقالب تجاهل معامل add_generation_prompt. مُطالبات التوليد مُهمة! إذا كان نموذجك يتطلب مُطالبة توليد ولكنها غير مُعيّنة في القالب، فمن المُحتمل أن تتدهور عمليات توليد النموذج بشدة، أو قد يُظهر النموذج سلوكًا غير عادي مثل متابعة رسالة المستخدم الأخيرة! ### كتابة قوالب أكبر وتصحيحها عندما تم تقديم هذه الميزة، كانت معظم القوالب صغيرة جدًا، أي ما يُعادل نص برمجي "من سطر واحد" في Jinja. ومع ذلك، مع النماذج والميزات الجديدة مثل استخدام الأدوات و RAG، يمكن أن يصل طول بعض القوالب إلى 100 سطر أو أكثر. عند كتابة قوالب كهذه، من الجيد كتابتها في ملف مُنفصل، باستخدام مُحرر نصوص. يمكنك بسهولة استخراج قالب دردشة إلى ملف: ```python open("template.jinja", "w").write(tokenizer.chat_template) ``` أو تحميل القالب المُحرر مرة أخرى إلى المعالج اللغوي: ```python tokenizer.chat_template = open("template.jinja").read() ``` كميزة إضافية، عندما تكتب قالبًا طويلاً متعدد الأسطر في ملف مُنفصل، ستتوافق أرقام الأسطر في هذا الملف تمامًا مع أرقام الأسطر في أخطاء تحليل القالب أو تنفيذه. سيُسهّل هذا كثيرًا تحديد مكان المشكلات. ### كتابة قوالب للأدوات على الرغم من أن قوالب الدردشة لا تفرض واجهة برمجة تطبيقات مُحددة للأدوات (أو لأي شيء حقًا)، فإننا نوصي مؤلفي القوالب بمحاولة الالتزام بواجهة برمجة تطبيقات قياسية حيثما أمكن. الهدف النهائي لقوالب الدردشة هو السماح بنقل التعليمات البرمجية عبر النماذج، لذا فإن الانحراف عن واجهة برمجة تطبيقات الأدوات القياسية يعني أن المستخدمين سيضطرون إلى كتابة تعليمات برمجية مخصصة لاستخدام الأدوات مع نموذجك. في بعض الأحيان يكون ذلك أمرًا لا مفر منه، ولكن غالبًا ما يكون من الممكن استخدام واجهة برمجة التطبيقات القياسية من خلال استخدام قوالب ذكية! أدناه، سنُدرج عناصر واجهة برمجة التطبيقات القياسية، ونقدم نصائح حول كتابة قوالب ستعمل بشكل جيد معها. #### تعريفات الأدوات يجب أن يتوقع قالبك أن يكون المتغير tools إما فارغًا (إذا لم يتم تمرير أي أدوات)، أو قائمة من قواميس مخطط JSON. تسمح أساليب قالب الدردشة الخاصة بنا للمستخدمين بتمرير الأدوات إما كمخطط JSON أو كدوال Python، ولكن عندما يتم تمرير الدوال، فإننا نقوم تلقائيًا بإنشاء مخطط JSON وتمريره إلى قالبك. نتيجة لذلك، سيكون متغير tools الذي يستقبله قالبك دائمًا قائمة من مخططات JSON. هنا مخطط JSON أداة نموذجي: ```json { "type": "function", "function": { "name": "multiply", "description": "دالة تضرب عددين", "parameters": { "type": "object", "properties": { "a": { "type": "number", "description": "الرقم الأول للضرب" }, "b": { "type": "number", "description": "الرقم الثاني للضرب" } }, "required": ["a", "b"] } } } ``` وهنا بعض الأمثلة البرمجية للتعامل مع الأدوات في قالب الدردشة الخاص بك. تذكر أن هذا مجرد مثال لتنسيق مُحدد - من المحتمل أن يحتاج نموذجك إلى تنسيق مختلف! ```text {%- if tools %} {%- for tool in tools %} {{- '<tool>' + tool['function']['name'] + '\n' }} {%- for argument in tool['function']['parameters']['properties'] %} {{- argument + ': ' + tool['function']['parameters']['properties'][argument]['description'] + '\n' }} {%- endfor %} {{- '\n</tool>' }} {%- endif %} {%- endif %} ``` يجب بالطبع اختيار الرموز المحددة ووصف الأدوات التي يُعرضها قالبك لتتناسب مع تلك التي تم تدريب نموذجك عليها. لا يوجد شرط أن يفهم نموذجك مُدخلات مخطط JSON، فقط أن يتمكن قالبك من ترجمة مخطط JSON إلى تنسيق نموذجك. على سبيل المثال، تم تدريب Command-R باستخدام أدوات مُعرّفة باستخدام رؤوس دوال Python، ولكن يقبل قالب أداة Command-R مخطط JSON، ويُحوّل الأنواع داخليًا ويُعرض أدوات الإدخال كعناوين Python. يمكنك فعل الكثير باستخدام القوالب! #### استدعاءات الأدوات استدعاءات الأدوات، إذا كانت موجودة، ستكون قائمة مُرفقة برسالة بدور "assistant". لاحظ أن tool_calls هي دائمًا قائمة، على الرغم من أن معظم نماذج استدعاء الأدوات تدعم فقط استدعاءات أدوات فردية في كل مرة، مما يعني أن القائمة ستحتوي عادةً على عنصر واحد فقط. هنا قاموس رسالة نموذجي يحتوي على استدعاء أداة: ```json { "role": "assistant", "tool_calls": [ { "type": "function", "function": { "name": "multiply", "arguments": { "a": 5, "b": 6 } } } ] } ``` والنمط الشائع للتعامل معها سيكون كهذا: ```text {%- if message['role'] == 'assistant' and 'tool_calls' in message %} {%- for tool_call in message['tool_calls'] %} {{- '<tool_call>' + tool_call['function']['name'] + '\n' + tool_call['function']['arguments']|tojson + '\n</tool_call>' }} {%- endif %} {%- endfor %} {%- endif %} ``` مرة أخرى، يجب عليك عرض استدعاء الأداة بالتنسيق والرموز الخاصة التي يتوقعها نموذجك. #### استجابات الأدوات استجابات الأدوات لها تنسيق بسيط: إنها قاموس رسالة بدور "tool"، ومفتاح "name" يُعطي اسم الدالة المُستدعاة، ومفتاح "content" يحتوي على نتيجة استدعاء الأداة. هنا استجابة أداة نموذجية: ```json { "role": "tool", "name": "multiply", "content": "30" } ``` لست بحاجة إلى استخدام جميع المفاتيح في استجابة الأداة. على سبيل المثال، إذا كان نموذجك لا يتوقع تضمين اسم الدالة في استجابة الأداة، فيمكن أن يكون عرضها بسيطًا مثل: ```text {%- if message['role'] == 'tool' %} {{- "<tool_result>" + message['content'] + "</tool_result>" }} {%- endif %} ``` مرة أخرى، تذكر أن التنسيق الفعلي والرموز الخاصة خاصة بالنموذج - يجب أن تُولي عناية كبيرة لضمان أن الرموز والمسافات الفارغة وكل شيء آخر يتطابق تمامًا مع التنسيق الذي تم تدريب نموذجك عليه!
transformers/docs/source/ar/chat_templating.md/0
{ "file_path": "transformers/docs/source/ar/chat_templating.md", "repo_id": "transformers", "token_count": 33669 }
# المحولات النمطية مكتبة `transformers` هي إطار عمل ذو فلسفة محدد؛ يتم تعريف فلسفتنا في [الدليل المفاهيمي](./philosophy). جوهر هذه الفلسفة يتمثل في مبدأ [نموذج واحد، ملف واحد](https://huggingface.co/blog/transformers-design-philosophy) في المكتبة. الجانب السلبي لهذا المكون هو تقييده لوراثة واستيراد مكونات الملفات. نتيجة لذلك، تتكرر مكونات النموذج عبر العديد من الملفات. يحتوي `transformers` على عدد كبير من طبقات الانتباه، يقارب عدد النماذج، والكثير منها متطابق. يتسبب هذا في تباعد عمليات التنفيذ المستقلة مع تطبيق الإصلاحات والتغييرات. على أجزاء محددة من التعليمات البرمجية. ولمعالجة ذلك، اعتمدنا مفهوم "النسخ" في المكتبة. فبإضافة تعليق يُشير إلى أن التعليمات البرمجية هي نسخة من أخرى، نضمن من خلال أنظمة CI والأوامر المحلية عدم تباعد النسخ. لكن هذه العملية، رغم بساطتها، تُسبب إرهاقاً. كما أنها تزيد العبء على المساهمين، وهو ما نهدف إلى تجاوزه. غالباً ما تتطلب مساهمات النماذج إضافة تعليمات برمجية (حوالي 1000 سطر)، ومعالج (حوالي 500 سطر)، واختبارات، ووثائق، إلخ. ونادراً ما تقل مساهمات النماذج عن 3000-5000 سطر من التعليمات البرمجية، معظمها أكواد نمطية. هذا يرفع مستوى المساهمات، ونهدف مع المحولات النمطية إلى خفض هذا المستوى إلى حدّ مقبول. ## ما هو؟ تقدم المحولات النمطية مفهوم ملف "نمطي" لمجلد نموذج. يقبل هذا الملف النمطي تعليمات برمجية غير مقبولة عادة في ملفات النمذجة/المعالجة، حيث يسمح بالاستيراد من نماذج مجاورة وكذلك الوراثة من الفئات إلى فئات أخرى. يعرّف هذا الملف النمطي النماذج والمعالجات وفئة التكوين التي سيتم تعريفها في وحداتهم المتعلقة. وأخيرًا، يقدم هذا الميزة أداة `linter` جديدة والتي ستعمل على "تفكيك" الملف النمطي إلى بنية "نموذج واحد، ملف واحد" هيكل الدليل. سيتم إنشاء هذه الملفات تلقائيًا في كل مرة يتم فيها تشغيل البرنامج النصي؛ مما يقلل من المساهمات المطلوبة إلى الملف النمطي، وبالتالي فقط إلى التغييرات بين النموذج المساهم والنماذج الأخرى. سيقوم مستخدمو النموذج في النهاية باستيراد واستخدام واجهة الملف الواحد، لذا لا يتوقع حدوث أي تغيير هنا. من خلال القيام بذلك، نأمل في الجمع بين أفضل ما في العالمين: تمكين المساهمات البسيطة مع الالتزام بفلسفتنا. لذلك، هذا بديل لعلامات `# Copied from`، ويمكن توقع انتقال النماذج المساهمة سابقًا إلى تنسيق المحولات النمطية الجديد في الأشهر المقبلة. ### التفاصيل تُبسط أداة "linter" الوراثة، مُنشئةً جميع الملفات المفردة من الملف النمطي، مع الحفاظ على شفافيتها أمام مستخدمي Python. حاليًا، تُبسط الأداة مستوىً واحدًا من الوراثة على سبيل المثال: - إذا ورثت فئة التكوين من فئة أخرى وأضافت/حذفت معامل، فسيتم إما الإشارة إلى الملف المولد مباشرةً (في حالة الإضافة) أو إزالته تمامًا (في حالة الحذف). - إذا ورثت فئة من فئة أخرى، على سبيل المثال: `class GemmaModel(LlamaModel):`، تُستنتج التبعيات تلقائيًا سيتم استنتاج جميع الوحدات الفرعية تلقائيًا من الفئة الأصلية. - إذا قمت بتعريف وظائف جديدة في الملف `modular` واستخدمتها داخل الفئات، فستستنتج أداة linter ذلك تلقائيًا يجب أن تكون قادرًا على كتابة كل شيء (المجزىء اللغوي، ومُعالِج الصور، والنموذج، والتكوين) في الملف `modular`، وسيتم إنشاء الملفات المُقابلة تلقائيًا. ### التطبيق [TODO] نقدم اختبارًا جديدًا، للتأكد من أن المحتوى المولد يتطابق مع ما هو موجود في `modular_xxxx.py` ### الأمثلة هنا مثال سريع باستخدام BERT و RoBERTa. النموذجان مرتبطان ارتباطًا وثيقًا: يختلف تنفيذهما النموذجي في طبقة تضمين. بدلاً من إعادة تعريف النموذج بالكامل، إليك كيف يبدو ملف `modular_roberta.py` لفئات النمذجة والتكوين (لأغراض المثال، يتم تجاهل المجزىء اللغوي في هذا الوقت حيث أنه مختلف جدًا). ```python from torch import nn from ..bert.configuration_bert import BertConfig from ..bert.modeling_bert import ( BertModel, BertEmbeddings, BertForMaskedLM ) # تكوين RoBERTa مطابق لتكوين BERT class RobertaConfig(BertConfig): model_type = 'roberta' # نعيد تعريف الإضافات هنا لتسليط الضوء على اختلاف معرف الحشو، ونعيد تعريف الإضافات الموضعية class RobertaEmbeddings(BertEmbeddings): def __init__(self, config): super().__init__(config()) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) # نموذج RoBERTa مطابق لنموذج BERT، باستثناء طبقة الإضافات. # نعيد تعريف الإضافات أعلاه، لذا هنا لا توجد حاجة لعمل إضافي class RobertaModel(BertModel): def __init__(self, config): super().__init__(config) self.embeddings = RobertaEmbeddings(config) # الرؤوس الآن تحتاج فقط إلى إعادة تعريف النموذج داخل `RobertaModel` الصحيح class RobertaForMaskedLM(BertForMaskedLM): def __init__(self, config): super().__init__(config) self.model = RobertaModel(config) ``` لاحظ أنه إذا لم تستخدم الاعتماد الذي حددته، فستحصل على الخطأ التالي: ```bash ValueError: You defined `RobertaEmbeddings` in the modular_roberta.py, it should be used when you define `BertModel`, as it is one of it's direct dependencies. Make sure you use it in the `__init__` function. ``` بالإضافة إلى ذلك، قد تجد قائمة بالأمثلة هنا: ## ما هو ليس كذلك ليس بديلاً لتعليمات برمجة النمذجة (بعد؟)، وإذا لم يكن نموذجك يعتمد على أي شيء آخر موجود من قبل، فيمكنك إضافة ملف `نمذجة` كالعادة. ## الاستخدام المتقدم ### إزالة السمات والوظائف لإزالة السمات التي لا تستخدم في نموذجك النمطي، والتي لا تريد رؤيتها في النمذجة المفككة: ```python class GemmaModel(LlamaModel): | class GemmaModel(PreTrainedModel): def __init__(self, config): | def __init__(self, config): super().__init__(self, eos_token) | super().__init__(config) del self.embed_tokens | self.padding_idx = config.pad_token_id | self.vocab_size = config.vocab_size | | self.layers = nn.ModuleList( | [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] | ) | self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | self.rotary_emb = LlamaRotaryEmbedding(config=config) | self.gradient_checkpointing = False | | # Initialize weights and apply final processing | self.post_init() ``` إذا قمت بالتحقق من `LlamaModel` الأصلي، فستجد `embed_tokens` الذي تمت إزالته هنا (كما هو متوقع!) إزالة وظيفة مشابهة، تحتاج فقط إلى كتابتها مع `raise ValueError("")` لمحاكاة السلوك الذي تريده فعليًا عند إزالة وظيفة أصلية في بايثون. ```python class GemmaTokenizer(LlamaTokenizer): ... def get_spm_processor(self): raise AttributeError("Not needed for Gemma") def unk_token_length(self): raise AttributeError("Not needed for Gemma") ``` ### تعريف وظائف جديدة إذا قمت بتعريف وظيفة جديدة في الملف `modular` لاستخدامها داخل فئة، على سبيل المثال ```python def my_new_function(*args, **kwargs): # Do something here pass class GemmaModel(LlamaModel): def forward(*args, **kwargs): # Call the function example = my_new_function(*args, **kwargs) # continue here ``` سيتم نسخ وظيفة `my_new_function` (وبشكل متكرر، أي وظائف أخرى جديدة يتم استدعاؤها في جسمها) تلقائيًا في الملف الذي يتم استخدامه. ### استدعاء `super()` قمنا مؤخرًا بشحن بعض الميزات التي تسمح لك بالانتقال من: ```python class GemmaTokenizer(LlamaTokenizer, PretrainedTokenizerFast): | class GemmaModel(nn.Module): def __init__(self, eos_token="</s>"): | def __init__(self): eos_token = AddedToken(eos_token) | eos_token = AddedToken(eos_token) PretrainedTokenizerFast.__init__(self, eos_token) | super().__init__(eos_token) ``` هذا مفيد عندما لا تريد تفكيك استدعاء `super()`، وتريد التمييز بين أي استدعاء super init تقوم به! ### التسمية الخاصة ندعم الآن أيضًا حالات خاصة مثل ```python class GemmaVisionModel(CLIPModel): pass ``` حيث اسم فئة `GemmaVision` الخاصة بك ليس هو نفسه `Gemma` النمطي. هذا مفيد للغاية للنماذج المركبة.
transformers/docs/source/ar/modular_transformers.md/0
{ "file_path": "transformers/docs/source/ar/modular_transformers.md", "repo_id": "transformers", "token_count": 6710 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # نمذجة اللغة المقنعة (Masked language modeling) [[open-in-colab]] <Youtube id="mqElG5QJWUg"/> تتنبأ نمذجة اللغة المقنعة برمز مقنع في تسلسل، ويمكن للنموذج الانتباه إلى الرموز بشكل ثنائي الاتجاه. هذا يعني أن النموذج لديه إمكانية الوصول الكاملة إلى الرموز الموجودة على اليسار واليمين. تعد نمذجة اللغة المقنعة ممتازة للمهام التي تتطلب فهمًا سياقيًا جيدًا لتسلسل كامل. BERT هو مثال على نموذج لغة مقنع. سيوضح لك هذا الدليل كيفية: 1. تكييف [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) على مجموعة فرعية [r/askscience](https://www.reddit.com/r/askscience/) من مجموعة بيانات [ELI5](https://huggingface.co/datasets/eli5). 2. استخدام نموذج المدرب الخاص بك للاستدلال. <Tip> لمعرفة جميع البنى والنسخ المتوافقة مع هذه المهمة، نوصي بالتحقق من [صفحة المهمة](https://huggingface.co/tasks/fill-mask) </Tip> قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية: ```bash pip install transformers datasets evaluate ``` نحن نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عندما تتم مطالبتك، أدخل رمزك لتسجيل الدخول: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## تحميل مجموعة بيانات ELI5 ابدأ بتحميل أول 5000 مثال من مجموعة بيانات [ELI5-Category](https://huggingface.co/datasets/eli5_category) باستخدام مكتبة 🤗 Datasets. سيعطيك هذا فرصة للتجربة والتأكد من أن كل شيء يعمل قبل قضاء المزيد من الوقت في التدريب على مجموعة البيانات الكاملة. ```py >>> from datasets import load_dataset >>> eli5 = load_dataset("eli5_category", split="train[:5000]") ``` قم بتقسيم مجموعة البيانات `train` إلى مجموعتي تدريب واختبار باستخدام الدالة [`~datasets.Dataset.train_test_split`]: ```py >>> eli5 = eli5.train_test_split(test_size=0.2) ``` ثم ألق نظرة على مثال: ```py >>> eli5["train"][0] {'q_id': '7h191n', 'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?', 'selftext': '', 'category': 'Economics', 'subreddit': 'explainlikeimfive', 'answers': {'a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'], 'text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.", 'None yet. It has to be reconciled with a vastly different house bill and then passed again.', 'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?', 'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'], 'score': [21, 19, 5, 3], 'text_urls': [[], [], [], ['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']]}, 'title_urls': ['url'], 'selftext_urls': ['url']} ``` على الرغم من أن هذا قد يبدو كثيرًا، إلا أنك مهتم حقًا بحقل `text`. ما هو رائع حول مهام نمذجة اللغة هو أنك لا تحتاج إلى تسميات (تُعرف أيضًا باسم المهمة غير الخاضعة للإشراف) لأن الكلمة التالية *هي* التسمية. ## معالجة مسبقة (Preprocess) <Youtube id="8PmhEIXhBvI"/> بالنسبة لنمذجة اللغة المقنعة، فإن الخطوة التالية هي تحميل معالج DistilRoBERTa لمعالجة حقل `text` الفرعي: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilroberta-base") ``` ستلاحظ من المثال أعلاه، أن حقل `text` موجود بالفعل داخل `answers`. هذا يعني أنك ستحتاج إلى استخراج حقل `text` الفرعي من بنيته المضمنة باستخدام الدالة [`flatten`](https://huggingface.co/docs/datasets/process#flatten): ```py >>> eli5 = eli5.flatten() >>> eli5["train"][0] {'q_id': '7h191n', 'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?', 'selftext': '', 'category': 'Economics', 'subreddit': 'explainlikeimfive', 'answers.a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'], 'answers.text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.", 'None yet. It has to be reconciled with a vastly different house bill and then passed again.', 'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?', 'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'], 'answers.score': [21, 19, 5, 3], 'answers.text_urls': [[], [], [], ['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']], 'title_urls': ['url'], 'selftext_urls': ['url']} ``` كل حقل فرعي هو الآن عمود منفصل كما هو موضح بواسطة بادئة `answers`، وحقل `text` هو قائمة الآن. بدلاً من معالجة كل جملة بشكل منفصل، قم بتحويل القائمة إلى سلسلة حتى تتمكن من معالجتها بشكل مشترك. هنا أول دالة معالجة مسبقة لربط قائمة السلاسل لكل مثال ومعالجة النتيجة: ```py >>> def preprocess_function(examples): ... return tokenizer([" ".join(x) for x in examples["answers.text"]]) ``` لتطبيق دالة المعالجة المسبقة على مجموعة البيانات بأكملها، استخدم الدالة 🤗 Datasets [`~datasets.Dataset.map`]. يمكنك تسريع دالة `map` عن طريق تعيين `batched=True` لمعالجة عدة عناصر في وقت واحد، وزيادة عدد العمليات باستخدام `num_proc`. احذف أي أعمدة غير ضرورية: ```py >>> tokenized_eli5 = eli5.map( ... preprocess_function, ... batched=True, ... num_proc=4, ... remove_columns=eli5["train"].column_names, ... ) ``` تحتوي مجموعة البيانات هذه على تسلسلات رمزية، ولكن بعضها أطول من الطول الأقصى للمدخلات للنموذج. يمكنك الآن استخدام دالة معالجة مسبقة ثانية لـ: - تجميع جميع التسلسلات - تقسيم التسلسلات المجمّعة إلى أجزاء أقصر محددة بـ `block_size`، والتي يجب أن تكون أقصر من الحد الأقصى لطول المدخلات ومناسبة لذاكرة GPU. ```py >>> block_size = 128 >>> def group_texts(examples): ... # تجميع جميع النصوص. ... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} ... total_length = len(concatenated_examples[list(examples.keys())[0]]) ... # نتجاهل الجزء المتبقي الصغير، يمكننا إضافة الحشو إذا كان النموذج يدعمه بدلاً من هذا الإسقاط، يمكنك ... # تخصيص هذا الجزء حسب احتياجاتك. ... if total_length >= block_size: ... total_length = (total_length // block_size) * block_size ... # تقسيمها إلى أجزاء بحجم block_size. ... result = { ... k: [t[i : i + block_size] for i in range(0, total_length, block_size)] ... for k, t in concatenated_examples.items() ... } ... return result ``` طبق دالة `group_texts` على مجموعة البيانات بأكملها: ```py >>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4) ``` الآن، قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorForLanguageModeling`]. من الأكثر كفاءة أن تقوم بـ *الحشو الديناميكي* ليصل طولها إلى أطول جملة في الدفعة أثناء التجميع، بدلاً من حشو مجموعة البيانات بأكملها إلى الطول الأقصى. <frameworkcontent> <pt> استخدم رمز نهاية التسلسل كرمز الحشو وحدد `mlm_probability` لحجب الرموز عشوائياً كل مرة تكرر فيها البيانات: ```py >>> from transformers import DataCollatorForLanguageModeling >>> tokenizer.pad_token = tokenizer.eos_token >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15) ``` </pt> <tf> استخدم رمز نهاية التسلسل كرمز الحشو وحدد `mlm_probability` لحجب الرموز عشوائياً كل مرة تكرر فيها البيانات: ```py >>> from transformers import DataCollatorForLanguageModeling >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15, return_tensors="tf") ``` </tf> </frameworkcontent> ## التدريب (Train) <frameworkcontent> <pt> <Tip> إذا لم تكن على دراية بتعديل نموذج باستخدام [`Trainer`], ألق نظرة على الدليل الأساسي [هنا](../training#train-with-pytorch-trainer)! </Tip> أنت مستعد الآن لبدء تدريب نموذجك! قم بتحميل DistilRoBERTa باستخدام [`AutoModelForMaskedLM`]: ```py >>> from transformers import AutoModelForMaskedLM >>> model = AutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base") ``` في هذه المرحلة، تبقى ثلاث خطوات فقط: 1. حدد معلمات التدريب الخاصة بك في [`TrainingArguments`]. المعلمة الوحيدة المطلوبة هي `output_dir` والتي تحدد مكان حفظ نموذجك. ستقوم بدفع هذا النموذج إلى Hub عن طريق تعيين `push_to_hub=True` (يجب أن تكون مسجلاً الدخول إلى Hugging Face لتحميل نموذجك). 2. قم بتمرير معلمات التدريب إلى [`Trainer`] مع النموذج، ومجموعات البيانات، ومجمّع البيانات. 3. قم باستدعاء [`~Trainer.train`] لتعديل نموذجك. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_eli5_mlm_model", ... eval_strategy="epoch", ... learning_rate=2e-5, ... num_train_epochs=3, ... weight_decay=0.01, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=lm_dataset["train"], ... eval_dataset=lm_dataset["test"], ... data_collator=data_collator, ... tokenizer=tokenizer, ... ) >>> trainer.train() ``` بمجرد اكتمال التدريب، استخدم طريقة [`~transformers.Trainer.evaluate`] لتقييم النموذج والحصول على مقياس الحيرة: ```py >>> import math >>> eval_results = trainer.evaluate() >>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}") Perplexity: 8.76 ``` ثم شارك نموذجك على Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك: ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> إذا لم تكن على دراية بتعديل نموذج باستخدام Keras، ألق نظرة على الدليل الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)! </Tip> لتعديل نموذج في TensorFlow، ابدأ بإعداد دالة محسن، وجدول معدل التعلم، وبعض معلمات التدريب: ```py >>> from transformers import create_optimizer, AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` ثم يمكنك تحميل DistilRoBERTa باستخدام [`TFAutoModelForMaskedLM`]: ```py >>> from transformers import TFAutoModelForMaskedLM >>> model = TFAutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base") ``` قم بتحويل مجموعات بياناتك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]: ```py >>> tf_train_set = model.prepare_tf_dataset( ... lm_dataset["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = model.prepare_tf_dataset( ... lm_dataset["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` قم بتهيئة النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن نماذج Transformers لديها جميعها دالة خسارة افتراضية ذات صلة بالمهمة، لذلك لا تحتاج إلى تحديد واحدة ما لم تكن تريد ذلك: ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) # لا توجد حجة للخسارة! ``` يمكن القيام بذلك عن طريق تحديد مكان دفع نموذجك ومعالج الرموز في [`~transformers.PushToHubCallback`]: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> callback = PushToHubCallback( ... output_dir="my_awesome_eli5_mlm_model", ... tokenizer=tokenizer, ... ) ``` أخيراً، أنت مستعد لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق، وعدد العصور، والتعليقات الخاصة بك لتعديل النموذج: ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback]) ``` بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائياً إلى Hub حتى يتمكن الجميع من استخدامه! </tf> </frameworkcontent> <Tip> لمثال أكثر تفصيلاً حول كيفية تعديل نموذج للنمذجة اللغوية المقنعة، ألق نظرة على الدفتر المقابل [دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb) أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). </Tip> ## الاستدلال رائع، الآن بعد أن قمت بتعديل نموذج، يمكنك استخدامه للاستدلال! جهّز بعض النصوص التي تريد أن يملأ النموذج الفراغات فيها، واستخدم الرمز الخاص `<mask>` للإشارة إلى الفراغ: ```py >>> text = "The Milky Way is a <mask> galaxy." ``` أبسط طريقة لتجربة نموذجك المعدل للاستدلال هي استخدامه في [`pipeline`]. قم بإنشاء كائن `pipeline` لملء الفراغ مع نموذجك، ومرر نصك إليه. إذا أردت، يمكنك استخدام معلمة `top_k` لتحديد عدد التنبؤات التي تريد إرجاعها: ```py >>> from transformers import pipeline >>> mask_filler = pipeline("fill-mask", "username/my_awesome_eli5_mlm_model") >>> mask_filler(text, top_k=3) [{'score': 0.5150994658470154, 'token': 21300, 'token_str': ' spiral', 'sequence': 'The Milky Way is a spiral galaxy.'}, {'score': 0.07087188959121704, 'token': 2232, 'token_str': ' massive', 'sequence': 'The Milky Way is a massive galaxy.'}, {'score': 0.06434620916843414, 'token': 650, 'token_str': ' small', 'sequence': 'The Milky Way is a small galaxy.'}] ``` <frameworkcontent> <pt> قم بتجزئة النص وإرجاع `input_ids` كمتجهات PyTorch. ستحتاج أيضًا إلى تحديد موضع رمز `<mask>`: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_mlm_model") >>> inputs = tokenizer(text, return_tensors="pt") >>> mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1] ``` قم بتمرير المدخلات إلى النموذج وإرجاع `logits` للرمز المقنع: ```py >>> from transformers import AutoModelForMaskedLM >>> model = AutoModelForMaskedLM.from_pretrained("username/my_awesome_eli5_mlm_model") >>> logits = model(**inputs).logits >>> mask_token_logits = logits[0, mask_token_index, :] ``` ثم قم بإرجاع الرموز الثلاثة المقنعة ذات الاحتمالية الأعلى وطباعتها: ```py >>> top_3_tokens = torch.topk(mask_token_logits, 3, dim=1).indices[0].tolist() >>> for token in top_3_tokens: ... print(text.replace(tokenizer.mask_token, tokenizer.decode([token]))) The Milky Way is a spiral galaxy. The Milky Way is a massive galaxy. The Milky Way is a small galaxy. ``` </pt> <tf> قم بتقسيم النص إلى رموز وإرجاع `input_ids` كـ TensorFlow tensors. ستحتاج أيضًا إلى تحديد موضع رمز `<mask>`: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_mlm_model") >>> inputs = tokenizer(text, return_tensors="tf") >>> mask_token_index = tf.where(inputs["input_ids"] == tokenizer.mask_token_id)[0, 1] ``` قم بتمرير المدخلات إلى النموذج وإرجاع `logits` للرمز المقنع: ```py >>> from transformers import TFAutoModelForMaskedLM >>> model = TFAutoModelForMaskedLM.from_pretrained("username/my_awesome_eli5_mlm_model") >>> logits = model(**inputs).logits >>> mask_token_logits = logits[0, mask_token_index, :] ``` ثم قم بإرجاع الرموز الثلاثة المقنعة ذات الاحتمالية الأعلى وطباعتها: ```py >>> top_3_tokens = tf.math.top_k(mask_token_logits, 3).indices.numpy() >>> for token in top_3_tokens: ... print(text.replace(tokenizer.mask_token, tokenizer.decode([token]))) The Milky Way is a spiral galaxy. The Milky Way is a massive galaxy. The Milky Way is a small galaxy. ``` </tf> </frameworkcontent>
transformers/docs/source/ar/tasks/masked_language_modeling.md/0
{ "file_path": "transformers/docs/source/ar/tasks/masked_language_modeling.md", "repo_id": "transformers", "token_count": 9368 }
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Schnellstart - local: installation title: Installation title: Erste Schritte - sections: - local: pipeline_tutorial title: Pipelines für Inferenzen - local: autoclass_tutorial title: Laden von vortrainierten Instanzen mit einer AutoClass - local: preprocessing title: Vorverarbeiten - local: training title: Optimierung eines vortrainierten Modells - local: run_scripts title: Trainieren mit einem Skript - local: accelerate title: Verteiltes Training mit 🤗 Accelerate - local: peft title: Laden und Trainieren von Adaptern mit 🤗 PEFT - local: model_sharing title: Ein Modell teilen - local: transformers_agents title: Agents - local: llm_tutorial title: Generation with LLMs title: Tutorials - sections: - local: contributing title: Wie kann man zu 🤗 Transformers beitragen? - local: add_new_model title: Wie fügt man ein Modell zu 🤗 Transformers hinzu? - local: add_new_pipeline title: Wie fügt man eine Pipeline zu 🤗 Transformers hinzu? - local: testing title: Testen - local: pr_checks title: Überprüfung einer Pull Request title: Contribute
transformers/docs/source/de/_toctree.yml/0
{ "file_path": "transformers/docs/source/de/_toctree.yml", "repo_id": "transformers", "token_count": 445 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Testen Werfen wir einen Blick darauf, wie 🤗 Transformers-Modelle getestet werden und wie Sie neue Tests schreiben und die vorhandenen verbessern können. Es gibt 2 Testsuiten im Repository: 1. `tests` -- Tests für die allgemeine API 2. `examples` -- Tests hauptsächlich für verschiedene Anwendungen, die nicht Teil der API sind ## Wie Transformatoren getestet werden 1. Sobald ein PR eingereicht wurde, wird er mit 9 CircleCi Jobs getestet. Jeder neue Commit zu diesem PR wird erneut getestet. Diese Aufträge sind in dieser [Konfigurationsdatei](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml) definiert, so dass Sie bei Bedarf die gleiche Umgebung auf Ihrem Rechner reproduzieren können. Umgebung auf Ihrem Rechner reproduzieren können. Diese CI-Jobs führen keine `@slow`-Tests durch. 2. Es gibt 3 Jobs, die von [github actions](https://github.com/huggingface/transformers/actions) ausgeführt werden: - [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml): prüft, ob die torch hub Integration funktioniert. - [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): führt schnelle Tests auf der GPU nur bei Commits auf `main`. Es wird nur ausgeführt, wenn ein Commit auf `main` den Code in einem der folgenden Ordner aktualisiert hat: `src`, `tests`, `.github` (um zu verhindern, dass er auf hinzugefügten Modellkarten, Notebooks usw. läuft) - [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): führt normale und langsame Tests auf GPU in `tests` und `examples`: ```bash RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/ ``` Die Ergebnisse können Sie [hier](https://github.com/huggingface/transformers/actions) sehen. ## Tests ausführen ### Auswahl der auszuführenden Tests In diesem Dokument wird ausführlich erläutert, wie Tests ausgeführt werden können. Wenn Sie nach der Lektüre noch mehr Details benötigen finden Sie diese [hier](https://docs.pytest.org/en/latest/usage.html). Hier sind einige der nützlichsten Möglichkeiten, Tests auszuführen. Alle ausführen: ```console pytest ``` oder: ```bash make test ``` Beachten Sie, dass Letzteres wie folgt definiert ist: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` was pytest anweist: - so viele Testprozesse laufen zu lassen, wie es CPU-Kerne gibt (was zu viele sein könnten, wenn Sie nicht über eine Menge RAM verfügen!) - sicherzustellen, dass alle Tests aus derselben Datei von demselben Testprozess ausgeführt werden - Erfassen Sie keine Ausgaben - im ausführlichen Modus laufen lassen ### Abrufen der Liste aller Tests Alle Tests der Testsuite: ```bash pytest --collect-only -q ``` Alle Tests einer bestimmten Testdatei: ```bash pytest tests/test_optimization.py --collect-only -q ``` ### Führen Sie ein bestimmtes Testmodul aus Um ein einzelnes Testmodul auszuführen: ```bash pytest tests/utils/test_logging.py ``` ### Spezifische Tests ausführen Da unittest in den meisten Tests verwendet wird, müssen Sie, um bestimmte Untertests auszuführen, den Namen der unittest Klasse, die diese Tests enthält. Er könnte zum Beispiel lauten: ```bash pytest tests/test_optimization.py::OptimizationTest::test_adam_w ``` Hier: - `tests/test_optimization.py` - die Datei mit den Tests - `OptimizationTest` - der Name der Klasse - `test_adam_w` - der Name der spezifischen Testfunktion Wenn die Datei mehrere Klassen enthält, können Sie auswählen, dass nur die Tests einer bestimmten Klasse ausgeführt werden sollen. Zum Beispiel: ```bash pytest tests/test_optimization.py::OptimizationTest ``` führt alle Tests innerhalb dieser Klasse aus. Wie bereits erwähnt, können Sie sehen, welche Tests in der Klasse `OptimizationTest` enthalten sind, indem Sie sie ausführen: ```bash pytest tests/test_optimization.py::OptimizationTest --collect-only -q ``` Sie können Tests mit Hilfe von Schlüsselwortausdrücken ausführen. Um nur Tests auszuführen, deren Name `adam` enthält: ```bash pytest -k adam tests/test_optimization.py ``` Die logischen `und` und `oder` können verwendet werden, um anzugeben, ob alle Schlüsselwörter übereinstimmen sollen oder nur eines. `nicht` kann verwendet werden, um negieren. Um alle Tests auszuführen, außer denen, deren Name `adam` enthält: ```bash pytest -k "not adam" tests/test_optimization.py ``` Und Sie können die beiden Muster in einem kombinieren: ```bash pytest -k "ada and not adam" tests/test_optimization.py ``` Um zum Beispiel sowohl `test_adafactor` als auch `test_adam_w` auszuführen, können Sie verwenden: ```bash pytest -k "test_adam_w or test_adam_w" tests/test_optimization.py ``` Beachten Sie, dass wir hier `oder` verwenden, da wir wollen, dass eines der Schlüsselwörter übereinstimmt, um beide einzuschließen. Wenn Sie nur Tests einschließen möchten, die beide Muster enthalten, müssen Sie `und` verwenden: ```bash pytest -k "test and ada" tests/test_optimization.py ``` ### Führen Sie `accelerate` Tests durch Manchmal müssen Sie `accelerate` Tests für Ihre Modelle ausführen. Dazu fügen Sie einfach `-m accelerate_tests` zu Ihrem Befehl hinzu, wenn Sie diese Tests bei einem `OPT`-Lauf ausführen möchten: ```bash RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py ``` ### Dokumentationstests ausführen Um zu testen, ob die Dokumentationsbeispiele korrekt sind, sollten Sie überprüfen, ob die `doctests` erfolgreich sind. Lassen Sie uns als Beispiel den docstring von [WhisperModel.forward](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035) verwenden: ```python r""" Returns: Example: ```python >>> import torch >>> from transformers import WhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = WhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" ``` Führen Sie einfach die folgende Zeile aus, um automatisch jedes docstring-Beispiel in der gewünschten Datei zu testen: ```bash pytest --doctest-modules <path_to_file_or_dir> ``` Wenn die Datei eine Markdown-Erweiterung hat, sollten Sie das Argument `--doctest-glob="*.md"` hinzufügen. ### Nur geänderte Tests ausführen Mit [pytest-picked](https://github.com/anapaulagomes/pytest-picked) können Sie die Tests ausführen, die sich auf die unstaged Dateien oder den aktuellen Zweig (gemäß Git) beziehen. Auf diese Weise können Sie schnell testen, ob Ihre Änderungen nichts kaputt gemacht haben. nichts kaputt gemacht haben, da die Tests für Dateien, die Sie nicht verändert haben, nicht ausgeführt werden. ```bash pip install pytest-picked ``` ```bash pytest --picked ``` Alle Tests werden von Dateien und Ordnern ausgeführt, die geändert, aber noch nicht übergeben wurden. ### Fehlgeschlagene Tests bei Änderung der Quelle automatisch wiederholen [pytest-xdist](https://github.com/pytest-dev/pytest-xdist) bietet eine sehr nützliche Funktion zur Erkennung aller fehlgeschlagenen Tests zu erkennen und dann darauf zu warten, dass Sie Dateien ändern, um die fehlgeschlagenen Tests so lange zu wiederholen, bis sie erfolgreich sind, während Sie die sie reparieren. So müssen Sie pytest nicht erneut starten, nachdem Sie die Korrektur vorgenommen haben. Dies wird so lange wiederholt, bis alle Tests bestanden sind. Danach wird erneut ein vollständiger Durchlauf durchgeführt. ```bash pip install pytest-xdist ``` So rufen Sie den Modus auf: `pytest -f` oder `pytest --looponfail` Datei-Änderungen werden erkannt, indem die Wurzelverzeichnisse von `looponfailroots` und alle ihre Inhalte (rekursiv) untersucht werden. Wenn die Vorgabe für diesen Wert für Sie nicht funktioniert, können Sie ihn in Ihrem Projekt ändern, indem Sie eine Konfigurations Option in der Datei `setup.cfg` ändern: ```ini [tool:pytest] looponfailroots = transformers tests ``` oder die Dateien `pytest.ini`/`tox.ini``: ```ini [pytest] looponfailroots = transformers tests ``` Dies würde dazu führen, dass nur nach Dateiänderungen in den jeweiligen Verzeichnissen gesucht wird, die relativ zum Verzeichnis der ini-Datei angegeben sind. Verzeichnis. [pytest-watch](https://github.com/joeyespo/pytest-watch) ist eine alternative Implementierung dieser Funktionalität. ### Überspringen eines Testmoduls Wenn Sie alle Testmodule ausführen möchten, mit Ausnahme einiger weniger, können Sie diese ausschließen, indem Sie eine explizite Liste der auszuführenden Tests angeben. Für Beispiel: Um alle Tests außer `test_modeling_*.py` auszuführen: ```bash pytest *ls -1 tests/*py | grep -v test_modeling* ``` ### Status leeren CI-Builds und wenn Isolation wichtig ist (gegen Geschwindigkeit), sollte der Cache geleert werden: ```bash pytest --cache-clear tests ``` ### Tests parallel ausführen Wie bereits erwähnt, führt `make test` über das Plugin `pytest-xdist` Tests parallel aus (Argument `-n X`, z.B. `-n 2` um 2 Jobs parallel laufen zu lassen). Mit der Option `--dist=` von `pytest-xdist` können Sie steuern, wie die Tests gruppiert werden. Mit `--dist=loadfile` werden die Tests, die sich in einer Datei befinden, in denselben Prozess. Da die Reihenfolge der ausgeführten Tests unterschiedlich und nicht vorhersehbar ist, kann die Ausführung der Testsuite mit `pytest-xdist` zu Fehlern führt (was bedeutet, dass wir einige unentdeckte gekoppelte Tests haben), verwenden Sie [pytest-replay](https://github.com/ESSS/pytest-replay), um die Tests in der gleichen Reihenfolge abzuspielen, was dabei helfen sollte diese fehlgeschlagene Sequenz auf ein Minimum zu reduzieren. ### Testreihenfolge und Wiederholung Es ist gut, die Tests mehrmals zu wiederholen, nacheinander, zufällig oder in Gruppen, um mögliche Abhängigkeiten und zustandsbezogene Fehler zu erkennen (Abriss). Und die einfache, mehrfache Wiederholung ist einfach gut, um einige Probleme zu erkennen, die durch die Zufälligkeit von DL aufgedeckt werden. #### Wiederholungstests - [pytest-flakefinder](https://github.com/dropbox/pytest-flakefinder): ```bash pip install pytest-flakefinder ``` Und führen Sie dann jeden Test mehrmals durch (standardmäßig 50): ```bash pytest --flake-finder --flake-runs=5 tests/test_failing_test.py ``` <Tip> Dieses Plugin funktioniert nicht mit dem `-n` Flag von `pytest-xdist`. </Tip> <Tip> Es gibt noch ein anderes Plugin `pytest-repeat`, aber es funktioniert nicht mit `unittest`. </Tip> #### Run tests in a random order ```bash pip install pytest-random-order ``` Wichtig: Das Vorhandensein von `pytest-random-order` sorgt für eine automatische Zufallsanordnung der Tests, es sind keine Konfigurationsänderungen oder Befehlszeilenoptionen sind nicht erforderlich. Wie bereits erläutert, ermöglicht dies die Erkennung von gekoppelten Tests - bei denen der Zustand eines Tests den Zustand eines anderen beeinflusst. Wenn `pytest-random-order` installiert ist, gibt es den Zufallswert aus, der für diese Sitzung verwendet wurde, z.B: ```bash pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` Wenn eine bestimmte Sequenz fehlschlägt, können Sie sie reproduzieren, indem Sie genau diesen Seed hinzufügen, z.B: ```bash pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` Es wird nur dann die exakte Reihenfolge reproduzieren, wenn Sie genau dieselbe Liste von Tests (oder gar keine Liste) verwenden. Sobald Sie beginnen, die Liste die Liste manuell einzugrenzen, können Sie sich nicht mehr auf den Seed verlassen, sondern müssen die Tests manuell in der genauen Reihenfolge auflisten auflisten und pytest anweisen, sie nicht zu randomisieren, indem Sie `--random-order-bucket=none` verwenden, z.B.: ```bash pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py ``` So deaktivieren Sie das Shuffling für alle Tests: ```bash pytest --random-order-bucket=none ``` Standardmäßig ist `--random-order-bucket=module` impliziert, wodurch die Dateien auf den Modulebenen gemischt werden. Es kann auch auf den Ebenen `class`, `package`, `global` und `none` mischen. Die vollständigen Details entnehmen Sie bitte der [Dokumentation](https://github.com/jbasko/pytest-random-order). Eine weitere Alternative zur Randomisierung ist: [`pytest-random`](https://github.com/pytest-dev/pytest-randomly). Dieses Modul hat eine sehr ähnliche Funktionalität/Schnittstelle, aber es hat nicht die Eimermodi, die in `pytest-random-order` zur Verfügung. Es hat das gleiche Problem, dass es sich nach der Installation aufdrängt. ### Variationen von Aussehen und Bedienung #### pytest-zucker [pytest-sugar](https://github.com/Frozenball/pytest-sugar) ist ein Plugin, das das Erscheinungsbild verbessert, eine Fortschrittsbalken hinzufügt und Tests, die fehlschlagen, sowie die Bestätigung sofort anzeigt. Es wird bei der Installation automatisch aktiviert. ```bash pip install pytest-sugar ``` Um Tests ohne sie durchzuführen, führen Sie aus: ```bash pytest -p no:sugar ``` oder deinstallieren Sie es. #### Melden Sie den Namen jedes Subtests und seinen Fortschritt Für einen einzelnen oder eine Gruppe von Tests über `pytest` (nach `pip install pytest-pspec`): ```bash pytest --pspec tests/test_optimization.py ``` #### Zeigt fehlgeschlagene Tests sofort an [pytest-instafail](https://github.com/pytest-dev/pytest-instafail) zeigt Fehlschläge und Fehler sofort an, anstatt bis zum Ende der Testsitzung zu warten. ```bash pip install pytest-instafail ``` ```bash pytest --instafail ``` ### Zu GPU oder nicht zu GPU Bei einem GPU-aktivierten Setup fügen Sie zum Testen im reinen CPU-Modus `CUDA_VISIBLE_DEVICES=""` hinzu: ```bash CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py ``` oder wenn Sie mehrere Grafikprozessoren haben, können Sie angeben, welcher von `pytest` verwendet werden soll. Wenn Sie zum Beispiel nur den zweiten Grafikkarte zu verwenden, wenn Sie die Grafikkarten `0` und `1` haben, können Sie folgendes ausführen: ```bash CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py ``` Dies ist praktisch, wenn Sie verschiedene Aufgaben auf verschiedenen GPUs ausführen möchten. Einige Tests müssen nur auf der CPU ausgeführt werden, andere entweder auf der CPU, der GPU oder der TPU und wieder andere auf mehreren GPUs. Die folgenden skip Dekorateure werden verwendet, um die Anforderungen von Tests in Bezug auf CPU/GPU/TPU festzulegen: - `require_torch` - dieser Test wird nur unter Torch ausgeführt - `require_torch_gpu` - wie `require_torch` plus erfordert mindestens 1 GPU - `require_torch_multi_gpu` - wie `require_torch` und zusätzlich mindestens 2 GPUs erforderlich - `require_torch_non_multi_gpu` - wie `require_torch` plus benötigt 0 oder 1 GPUs - `require_torch_up_to_2_gpus` - wie `require_torch` plus erfordert 0 oder 1 oder 2 GPUs - `require_torch_xla` - wie `require_torch` plus erfordert mindestens 1 TPU Lassen Sie uns die GPU-Anforderungen in der folgenden Tabelle darstellen: | n gpus | decorator | |--------|--------------------------------| | `>= 0` | `@require_torch` | | `>= 1` | `@require_torch_gpu` | | `>= 2` | `@require_torch_multi_gpu` | | `< 2` | `@require_torch_non_multi_gpu` | | `< 3` | `@require_torch_up_to_2_gpus` | Hier ist zum Beispiel ein Test, der nur ausgeführt werden muss, wenn 2 oder mehr GPUs verfügbar sind und pytorch installiert ist: ```python no-style @require_torch_multi_gpu def test_example_with_multi_gpu(): ``` Wenn ein Test `tensorflow` benötigt, verwenden Sie den Dekorator `require_tf`. Zum Beispiel: ```python no-style @require_tf def test_tf_thing_with_tensorflow(): ``` Diese Dekors können gestapelt werden. Wenn zum Beispiel ein Test langsam ist und mindestens eine GPU unter pytorch benötigt, können Sie wie Sie ihn einrichten können: ```python no-style @require_torch_gpu @slow def test_example_slow_on_gpu(): ``` Einige Dekoratoren wie `@parametrized` schreiben Testnamen um, daher müssen `@require_*`-Sprungdekoratoren als letztes aufgeführt werden. zuletzt aufgeführt werden, damit sie korrekt funktionieren. Hier ist ein Beispiel für die korrekte Verwendung: ```python no-style @parameterized.expand(...) @require_torch_multi_gpu def test_integration_foo(): ``` Dieses Problem mit der Reihenfolge gibt es bei `@pytest.mark.parametrize` nicht, Sie können es an den Anfang oder an den Schluss setzen und es wird trotzdem funktionieren. funktionieren. Aber es funktioniert nur bei Nicht-Unittests. Innerhalb von Tests: - Wie viele GPUs sind verfügbar: ```python from transformers.testing_utils import get_gpu_count n_gpu = get_gpu_count() # works with torch and tf ``` ### Testen mit einem bestimmten PyTorch-Backend oder Gerät Um die Testsuite auf einem bestimmten Torch-Gerät auszuführen, fügen Sie `TRANSFORMERS_TEST_DEVICE="$Gerät"` hinzu, wobei `$Gerät` das Ziel-Backend ist. Zum Beispiel, um nur auf der CPU zu testen: ```bash TRANSFORMERS_TEST_DEVICE="cpu" pytest tests/utils/test_logging.py ``` Diese Variable ist nützlich, um benutzerdefinierte oder weniger verbreitete PyTorch-Backends wie `mps` zu testen. Sie kann auch verwendet werden, um den gleichen Effekt wie `CUDA_VISIBLE_DEVICES` zu erzielen, indem Sie bestimmte GPUs anvisieren oder im reinen CPU-Modus testen. Bestimmte Geräte erfordern einen zusätzlichen Import, nachdem Sie `torch` zum ersten Mal importiert haben. Dies kann über die Umgebungsvariable `TRANSFORMERS_TEST_BACKEND` festgelegt werden: ```bash TRANSFORMERS_TEST_BACKEND="torch_npu" pytest tests/utils/test_logging.py ``` ### Verteiltes Training `pytest` kann nicht direkt mit verteiltem Training umgehen. Wenn dies versucht wird, tun die Unterprozesse nicht das Richtige und denken am Ende, sie seien `pytest` und beginnen, die Testsuite in Schleifen auszuführen. Es funktioniert jedoch, wenn man einen normalen Prozess erzeugt, der dann mehrere Worker erzeugt und die IO-Pipes verwaltet. Hier sind einige Tests, die dies verwenden: - [test_trainer_distributed.py](https://github.com/huggingface/transformers/tree/main/tests/trainer/test_trainer_distributed.py) - [test_deepspeed.py](https://github.com/huggingface/transformers/tree/main/tests/deepspeed/test_deepspeed.py) Um direkt mit der Ausführung zu beginnen, suchen Sie in diesen Tests nach dem Aufruf `execute_subprocess_async`. Sie benötigen mindestens 2 GPUs, um diese Tests in Aktion zu sehen: ```bash CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py ``` ### Erfassung von Ausgaben Während der Testausführung werden alle Ausgaben, die an `stdout` und `stderr` gesendet werden, aufgezeichnet. Wenn ein Test oder eine Setup-Methode fehlschlägt, wird die wird die entsprechende aufgezeichnete Ausgabe in der Regel zusammen mit dem Fehler-Traceback angezeigt. Um die Aufzeichnung von Ausgaben zu deaktivieren und `stdout` und `stderr` normal zu erhalten, verwenden Sie `-s` oder `--capture=no`: ```bash pytest -s tests/utils/test_logging.py ``` So senden Sie Testergebnisse an die JUnit-Formatausgabe: ```bash py.test tests --junitxml=result.xml ``` ### Farbsteuerung Keine Farbe zu haben (z.B. gelb auf weißem Hintergrund ist nicht lesbar): ```bash pytest --color=no tests/utils/test_logging.py ``` ### Testbericht an den Online-Dienst pastebin senden Erstellen Sie eine URL für jeden Testfehler: ```bash pytest --pastebin=failed tests/utils/test_logging.py ``` Dadurch werden Informationen über den Testlauf an einen entfernten Paste-Dienst übermittelt und eine URL für jeden Fehlschlag bereitgestellt. Sie können die Tests wie gewohnt auswählen oder z.B. -x hinzufügen, wenn Sie nur einen bestimmten Fehler senden möchten. Erstellen einer URL für ein ganzes Testsitzungsprotokoll: ```bash pytest --pastebin=all tests/utils/test_logging.py ``` ## Tests schreiben 🤗 Die Tests von Transformers basieren auf `unittest`, werden aber von `pytest` ausgeführt, so dass die meiste Zeit Funktionen aus beiden Systemen verwendet werden können. Sie können [hier](https://docs.pytest.org/en/stable/unittest.html) nachlesen, welche Funktionen unterstützt werden, aber das Wichtigste ist Wichtig ist, dass die meisten `pytest`-Fixtures nicht funktionieren. Auch die Parametrisierung nicht, aber wir verwenden das Modul `parametrisiert`, das auf ähnliche Weise funktioniert. ### Parametrisierung Oft besteht die Notwendigkeit, denselben Test mehrmals auszuführen, aber mit unterschiedlichen Argumenten. Das könnte innerhalb des Tests geschehen des Tests gemacht werden, aber dann gibt es keine Möglichkeit, den Test mit nur einem Satz von Argumenten auszuführen. ```python # test_this1.py import unittest from parameterized import parameterized class TestMathUnitTest(unittest.TestCase): @parameterized.expand( [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ] ) def test_floor(self, name, input, expected): assert_equal(math.floor(input), expected) ``` Nun wird dieser Test standardmäßig 3 Mal ausgeführt, wobei jedes Mal die letzten 3 Argumente von `test_floor` den entsprechenden Argumenten in der Parameterliste zugeordnet werden. die entsprechenden Argumente in der Parameterliste. Sie können auch nur die Parameter `negativ` und `ganzzahlig` mit ausführen: ```bash pytest -k "negative and integer" tests/test_mytest.py ``` oder alle Untertests außer `negativ`, mit: ```bash pytest -k "not negative" tests/test_mytest.py ``` Neben der Verwendung des gerade erwähnten Filters `-k` können Sie auch den genauen Namen jedes Untertests herausfinden und jeden oder alle unter Verwendung ihrer genauen Namen ausführen. ```bash pytest test_this1.py --collect-only -q ``` und es wird aufgelistet: ```bash test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction ``` Jetzt können Sie also nur 2 spezifische Untertests durchführen: ```bash pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer ``` Das Modul [parametrisiert](https://pypi.org/project/parameterized/), das sich bereits in den Entwickler-Abhängigkeiten befindet von `transformers` befindet, funktioniert sowohl für `unittests` als auch für `pytest` Tests. Wenn es sich bei dem Test jedoch nicht um einen `Unittest` handelt, können Sie `pytest.mark.parametrize` verwenden (oder Sie können sehen, dass es in einigen bestehenden Tests verwendet wird, meist unter `Beispiele`). Hier ist das gleiche Beispiel, diesmal unter Verwendung der `parametrize`-Markierung von `pytest`: ```python # test_this2.py import pytest @pytest.mark.parametrize( "name, input, expected", [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ], ) def test_floor(name, input, expected): assert_equal(math.floor(input), expected) ``` Genau wie bei `parametrisiert` können Sie mit `pytest.mark.parametrize` genau steuern, welche Subtests ausgeführt werden ausgeführt werden, wenn der Filter `-k` nicht ausreicht. Allerdings erzeugt diese Parametrisierungsfunktion einen etwas anderen Satz von Namen für die Untertests. Sie sehen folgendermaßen aus: ```bash pytest test_this2.py --collect-only -q ``` und es wird aufgelistet: ```bash test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1] ``` Jetzt können Sie also nur den spezifischen Test durchführen: ```bash pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0] ``` wie im vorherigen Beispiel. ### Dateien und Verzeichnisse In Tests müssen wir oft wissen, wo sich Dinge relativ zur aktuellen Testdatei befinden, und das ist nicht trivial, da der Test von mehreren Verzeichnissen aus aufgerufen werden kann oder sich in Unterverzeichnissen mit unterschiedlicher Tiefe befinden kann. Eine Hilfsklasse `transformers.test_utils.TestCasePlus` löst dieses Problem, indem sie alle grundlegenden Pfade sortiert und einfache Zugriffsmöglichkeiten auf sie bietet: - `pathlib`-Objekte (alle vollständig aufgelöst): - `test_file_path` - der aktuelle Testdateipfad, d.h. `__file__` - `test_file_dir` - das Verzeichnis, das die aktuelle Testdatei enthält - `tests_dir` - das Verzeichnis der `tests` Testreihe - `examples_dir` - das Verzeichnis der `examples` Test-Suite - `repo_root_dir` - das Verzeichnis des Repositorys - `src_dir` - das Verzeichnis von `src` (d.h. wo sich das Unterverzeichnis `transformers` befindet) - stringifizierte Pfade - wie oben, aber diese geben Pfade als Strings zurück, anstatt als `pathlib`-Objekte: - `test_file_path_str` - `test_file_dir_str` - `tests_dir_str` - `examples_dir_str` - `repo_root_dir_str` - `src_dir_str` Um diese zu verwenden, müssen Sie lediglich sicherstellen, dass der Test in einer Unterklasse von `transformers.test_utils.TestCasePlus` befindet. Zum Beispiel: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_local_locations(self): data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro" ``` Wenn Sie Pfade nicht über `pathlib` manipulieren müssen oder nur einen Pfad als String benötigen, können Sie jederzeit `str()` auf das `pathlib`-Objekt anwenden oder die Accessoren mit der Endung `_str` verwenden. Zum Beispiel: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_stringified_locations(self): examples_dir = self.examples_dir_str ``` ### Temporäre Dateien und Verzeichnisse Die Verwendung eindeutiger temporärer Dateien und Verzeichnisse ist für die parallele Durchführung von Tests unerlässlich, damit sich die Tests nicht gegenseitig überschreiben. Daten gegenseitig überschreiben. Außerdem möchten wir, dass die temporären Dateien und Verzeichnisse am Ende jedes Tests, der sie erstellt hat, gelöscht werden. erstellt hat. Daher ist die Verwendung von Paketen wie `tempfile`, die diese Anforderungen erfüllen, unerlässlich. Beim Debuggen von Tests müssen Sie jedoch sehen können, was in der temporären Datei oder dem temporären Verzeichnis gespeichert wird und Sie möchten Sie müssen den genauen Pfad kennen und dürfen ihn nicht bei jedem neuen Testdurchlauf zufällig ändern. Für solche Zwecke ist die Hilfsklasse `transformers.test_utils.TestCasePlus` am besten geeignet. Sie ist eine Unterklasse von Unittest.TestCase`, so dass wir in den Testmodulen einfach von ihr erben können. Hier ist ein Beispiel für die Verwendung dieser Klasse: ```python from transformers.testing_utils import TestCasePlus class ExamplesTests(TestCasePlus): def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` Dieser Code erstellt ein eindeutiges temporäres Verzeichnis und setzt `tmp_dir` auf dessen Speicherort. - Erstellen Sie ein eindeutiges temporäres Verzeichnis: ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` tmp_dir" enthält den Pfad zu dem erstellten temporären Verzeichnis. Es wird am Ende des Tests automatisch entfernt. Tests entfernt. - Erstellen Sie ein temporäres Verzeichnis meiner Wahl, stellen Sie sicher, dass es leer ist, bevor der Test beginnt, und leeren Sie es nach dem Test nicht. ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir("./xxx") ``` Dies ist nützlich für die Fehlersuche, wenn Sie ein bestimmtes Verzeichnis überwachen und sicherstellen möchten, dass die vorherigen Tests keine Daten darin hinterlassen haben. keine Daten dort hinterlassen haben. - Sie können das Standardverhalten außer Kraft setzen, indem Sie die Argumente `before` und `after` direkt überschreiben, was zu einem der folgenden Verhaltensweisen führt folgenden Verhaltensweisen: - `before=True`: das temporäre Verzeichnis wird immer zu Beginn des Tests gelöscht. - `before=False`: wenn das temporäre Verzeichnis bereits existiert, bleiben alle vorhandenen Dateien dort erhalten. - `after=True`: das temporäre Verzeichnis wird immer am Ende des Tests gelöscht. - `after=False`: das temporäre Verzeichnis wird am Ende des Tests immer beibehalten. <Tip> Um das Äquivalent von `rm -r` sicher ausführen zu können, sind nur Unterverzeichnisse des Projektarchivs checkout erlaubt, wenn ein explizites `tmp_dir` verwendet wird, so dass nicht versehentlich ein `/tmp` oder ein ähnlich wichtiger Teil des Dateisystems vernichtet wird. d.h. geben Sie bitte immer Pfade an, die mit `./` beginnen. </Tip> <Tip> Jeder Test kann mehrere temporäre Verzeichnisse registrieren, die alle automatisch entfernt werden, sofern nicht anders gewünscht. anders. </Tip> ### Temporäre Überschreibung von sys.path Wenn Sie `sys.path` vorübergehend überschreiben müssen, um z.B. von einem anderen Test zu importieren, können Sie den Kontextmanager `ExtendSysPath` verwenden. Beispiel: ```python import os from transformers.testing_utils import ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/.."): from test_trainer import TrainerIntegrationCommon # noqa ``` ### Überspringen von Tests Dies ist nützlich, wenn ein Fehler gefunden und ein neuer Test geschrieben wird, der Fehler aber noch nicht behoben ist. Damit wir ihn in das Haupt-Repository zu übertragen, müssen wir sicherstellen, dass er bei `make test` übersprungen wird. Methoden: - Ein **Skip** bedeutet, dass Sie erwarten, dass Ihr Test nur dann erfolgreich ist, wenn einige Bedingungen erfüllt sind, andernfalls sollte pytest den Test überspringen. die Ausführung des Tests ganz überspringen. Übliche Beispiele sind das Überspringen von Tests, die nur unter Windows laufen, auf Nicht-Windows-Plattformen oder das Überspringen von Tests, die von einer externen Ressource abhängen, die im Moment nicht verfügbar ist (z.B. eine Datenbank). - Ein **xfail** bedeutet, dass Sie erwarten, dass ein Test aus irgendeinem Grund fehlschlägt. Ein gängiges Beispiel ist ein Test für eine Funktion, die noch nicht noch nicht implementiert oder ein noch nicht behobener Fehler. Wenn ein Test trotz eines erwarteten Fehlschlags bestanden wird (markiert mit pytest.mark.xfail), ist dies ein xpass und wird in der Testzusammenfassung gemeldet. Einer der wichtigsten Unterschiede zwischen den beiden ist, dass `skip` den Test nicht ausführt, während `xfail` dies tut. Wenn also der Code, der fehlerhaft ist, einen schlechten Zustand verursacht, der sich auf andere Tests auswirkt, sollten Sie also nicht `xfail` verwenden. #### Implementierung - Hier sehen Sie, wie Sie einen ganzen Test bedingungslos überspringen können: ```python no-style @unittest.skip(reason="this bug needs to be fixed") def test_feature_x(): ``` oder mit pytest: ```python no-style @pytest.mark.skip(reason="this bug needs to be fixed") ``` oder mit dem `xfail` Weg: ```python no-style @pytest.mark.xfail def test_feature_x(): ``` - Hier erfahren Sie, wie Sie einen Test aufgrund einer internen Prüfung innerhalb des Tests auslassen können: ```python def test_feature_x(): if not has_something(): pytest.skip("unsupported configuration") ``` oder das ganze Modul: ```python import pytest if not pytest.config.getoption("--custom-flag"): pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True) ``` oder mit dem `xfail` Weg: ```python def test_feature_x(): pytest.xfail("expected to fail until bug XYZ is fixed") ``` - Hier erfahren Sie, wie Sie alle Tests in einem Modul überspringen können, wenn ein Import fehlt: ```python docutils = pytest.importorskip("docutils", minversion="0.3") ``` - Einen Test aufgrund einer Bedingung überspringen: ```python no-style @pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher") def test_feature_x(): ``` oder: ```python no-style @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_feature_x(): ``` oder überspringen Sie das ganze Modul: ```python no-style @pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") class TestClass(): def test_feature_x(self): ``` Weitere Details, Beispiele und Möglichkeiten finden Sie [hier](https://docs.pytest.org/en/latest/skipping.html). ### Langsame Tests Die Bibliothek der Tests wächst ständig, und einige der Tests brauchen Minuten, um ausgeführt zu werden, daher können wir es uns nicht leisten, eine Stunde zu warten, bis die eine Stunde auf die Fertigstellung der Testsuite auf CI zu warten. Daher sollten langsame Tests, mit einigen Ausnahmen für wichtige Tests, wie im folgenden Beispiel wie im folgenden Beispiel markiert werden: ```python no-style from transformers.testing_utils import slow @slow def test_integration_foo(): ``` Sobald ein Test als `@slow` markiert ist, setzen Sie die Umgebungsvariable `RUN_SLOW=1`, um solche Tests auszuführen, z.B: ```bash RUN_SLOW=1 pytest tests ``` Einige Dekoratoren wie `@parameterized` schreiben Testnamen um, daher müssen `@slow` und die übrigen Skip-Dekoratoren `@require_*` müssen als letztes aufgeführt werden, damit sie korrekt funktionieren. Hier ist ein Beispiel für die korrekte Verwendung: ```python no-style @parameterized.expand(...) @slow def test_integration_foo(): ``` Wie zu Beginn dieses Dokuments erläutert, werden langsame Tests nach einem Zeitplan ausgeführt und nicht in PRs CI Prüfungen. Es ist also möglich, dass einige Probleme bei der Einreichung eines PRs übersehen werden und zusammengeführt werden. Solche Probleme werden werden beim nächsten geplanten CI-Job abgefangen. Das bedeutet aber auch, dass es wichtig ist, die langsamen Tests auf Ihrem Rechner auszuführen, bevor Sie den PR einreichen. Hier ist ein grober Entscheidungsmechanismus für die Auswahl der Tests, die als langsam markiert werden sollen: Wenn der Test auf eine der internen Komponenten der Bibliothek ausgerichtet ist (z.B. Modellierungsdateien, Tokenisierungsdateien, Pipelines), dann sollten wir diesen Test in der nicht langsamen Testsuite ausführen. Wenn er sich auf einen anderen Aspekt der Bibliothek bezieht, wie z.B. die Dokumentation oder die Beispiele, dann sollten wir diese Tests in der langsamen Testsuite durchführen. Und dann, zur Verfeinerung Ansatz zu verfeinern, sollten wir Ausnahmen einführen: - Alle Tests, die einen umfangreichen Satz von Gewichten oder einen Datensatz mit einer Größe von mehr als ~50MB herunterladen müssen (z.B. Modell- oder Tokenizer-Integrationstests, Pipeline-Integrationstests) sollten auf langsam gesetzt werden. Wenn Sie ein neues Modell hinzufügen, sollten Sie sollten Sie eine kleine Version des Modells (mit zufälligen Gewichtungen) für Integrationstests erstellen und in den Hub hochladen. Dies wird wird in den folgenden Abschnitten erläutert. - Alle Tests, die ein Training durchführen müssen, das nicht speziell auf Schnelligkeit optimiert ist, sollten auf langsam gesetzt werden. - Wir können Ausnahmen einführen, wenn einige dieser Tests, die nicht langsam sein sollten, unerträglich langsam sind, und sie auf `@slow`. Auto-Modellierungstests, die große Dateien auf der Festplatte speichern und laden, sind ein gutes Beispiel für Tests, die als als `@slow` markiert sind. - Wenn ein Test in weniger als 1 Sekunde auf CI abgeschlossen wird (einschließlich eventueller Downloads), sollte es sich trotzdem um einen normalen Test handeln. Insgesamt müssen alle nicht langsamen Tests die verschiedenen Interna abdecken und dabei schnell bleiben. Zum Beispiel, kann eine signifikante Abdeckung erreicht werden, indem Sie mit speziell erstellten kleinen Modellen mit zufälligen Gewichten testen. Solche Modelle haben eine sehr geringe Anzahl von Schichten (z.B. 2), Vokabeln (z.B. 1000), usw. Dann können die `@slow`-Tests große langsame Modelle verwenden, um qualitative Tests durchzuführen. Um die Verwendung dieser Modelle zu sehen, suchen Sie einfach nach *winzigen* Modellen mit: ```bash grep tiny tests examples ``` Hier ist ein Beispiel für ein [Skript](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py), das das winzige Modell erstellt hat [stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de). Sie können es ganz einfach an Ihre eigene Architektur Ihres Modells anpassen. Es ist leicht, die Laufzeit falsch zu messen, wenn zum Beispiel ein großes Modell heruntergeladen wird, aber wenn Sie es lokal testen, würden die heruntergeladenen Dateien zwischengespeichert und somit die Download-Zeit nicht gemessen werden. Prüfen Sie daher den Ausführungsgeschwindigkeitsbericht in den CI-Protokollen (die Ausgabe von `pytest --durations=0 tests`). Dieser Bericht ist auch nützlich, um langsame Ausreißer zu finden, die nicht als solche gekennzeichnet sind oder die neu geschrieben werden müssen, um schnell zu sein. Wenn Sie bemerken, dass die Testsuite beim CI langsam wird, zeigt die oberste Liste dieses Berichts die langsamsten Tests. ### Testen der stdout/stderr-Ausgabe Um Funktionen zu testen, die in `stdout` und/oder `stderr` schreiben, kann der Test auf diese Ströme zugreifen, indem er die [capsys system](https://docs.pytest.org/en/latest/capture.html) von `pytest` zugreifen. So wird dies bewerkstelligt: ```python import sys def print_to_stdout(s): print(s) def print_to_stderr(s): sys.stderr.write(s) def test_result_and_stdout(capsys): msg = "Hello" print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() # consume the captured output streams # optional: if you want to replay the consumed streams: sys.stdout.write(out) sys.stderr.write(err) # test: assert msg in out assert msg in err ``` Und natürlich wird `stderr` in den meisten Fällen als Teil einer Ausnahme auftreten, so dass try/except in einem solchen Fall verwendet werden muss Fall verwendet werden: ```python def raise_exception(msg): raise ValueError(msg) def test_something_exception(): msg = "Not a good value" error = "" try: raise_exception(msg) except Exception as e: error = str(e) assert msg in error, f"{msg} is in the exception:\n{error}" ``` Ein anderer Ansatz zur Erfassung von stdout ist `contextlib.redirect_stdout`: ```python from io import StringIO from contextlib import redirect_stdout def print_to_stdout(s): print(s) def test_result_and_stdout(): msg = "Hello" buffer = StringIO() with redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() # optional: if you want to replay the consumed streams: sys.stdout.write(out) # test: assert msg in out ``` Ein wichtiges potenzielles Problem beim Erfassen von stdout ist, dass es `r` Zeichen enthalten kann, die bei normalem `print` alles zurücksetzen, was bisher gedruckt wurde. Mit `pytest` gibt es kein Problem, aber mit `pytest -s` werden diese werden diese Zeichen in den Puffer aufgenommen. Um den Test mit und ohne `-s` laufen zu lassen, müssen Sie also eine zusätzliche Bereinigung zusätzliche Bereinigung der erfassten Ausgabe vornehmen, indem Sie `re.sub(r'~.*\r', '', buf, 0, re.M)` verwenden. Aber dann haben wir einen Hilfskontextmanager-Wrapper, der sich automatisch um alles kümmert, unabhängig davon, ob er einige "*.*.*.*" enthält oder nicht: ```python from transformers.testing_utils import CaptureStdout with CaptureStdout() as cs: function_that_writes_to_stdout() print(cs.out) ``` Hier ist ein vollständiges Testbeispiel: ```python from transformers.testing_utils import CaptureStdout msg = "Secret message\r" final = "Hello World" with CaptureStdout() as cs: print(msg + final) assert cs.out == final + "\n", f"captured: {cs.out}, expecting {final}" ``` Wenn Sie `stderr` aufzeichnen möchten, verwenden Sie stattdessen die Klasse `CaptureStderr`: ```python from transformers.testing_utils import CaptureStderr with CaptureStderr() as cs: function_that_writes_to_stderr() print(cs.err) ``` Wenn Sie beide Streams auf einmal erfassen müssen, verwenden Sie die übergeordnete Klasse `CaptureStd`: ```python from transformers.testing_utils import CaptureStd with CaptureStd() as cs: function_that_writes_to_stdout_and_stderr() print(cs.err, cs.out) ``` Um das Debuggen von Testproblemen zu erleichtern, geben diese Kontextmanager standardmäßig die aufgezeichneten Streams beim Verlassen aus dem Kontext wieder. ### Erfassen von Logger-Streams Wenn Sie die Ausgabe eines Loggers validieren müssen, können Sie `CaptureLogger` verwenden: ```python from transformers import logging from transformers.testing_utils import CaptureLogger msg = "Testing 1, 2, 3" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.bart.tokenization_bart") with CaptureLogger(logger) as cl: logger.info(msg) assert cl.out, msg + "\n" ``` ### Testen mit Umgebungsvariablen Wenn Sie die Auswirkungen von Umgebungsvariablen für einen bestimmten Test testen möchten, können Sie einen Hilfsdekorator verwenden `transformers.testing_utils.mockenv` ```python from transformers.testing_utils import mockenv class HfArgumentParserTest(unittest.TestCase): @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) ``` Manchmal muss ein externes Programm aufgerufen werden, was die Einstellung von `PYTHONPATH` in `os.environ` erfordert, um mehrere lokale Pfade einzuschließen. mehrere lokale Pfade. Eine Hilfsklasse `transformers.test_utils.TestCasePlus` hilft Ihnen dabei: ```python from transformers.testing_utils import TestCasePlus class EnvExampleTest(TestCasePlus): def test_external_prog(self): env = self.get_env() # now call the external program, passing `env` to it ``` Je nachdem, ob die Testdatei in der Testsuite `tests` oder in `examples` war, wird sie korrekt eingerichtet `env[PYTHONPATH]` eines dieser beiden Verzeichnisse und auch das `src` Verzeichnis, um sicherzustellen, dass der Test gegen das aktuelle um sicherzustellen, dass der Test mit dem aktuellen Projektarchiv durchgeführt wird, und schließlich mit dem, was in `env[PYTHONPATH]` bereits eingestellt war, bevor der Test aufgerufen wurde. wenn überhaupt. Diese Hilfsmethode erstellt eine Kopie des Objekts `os.environ`, so dass das Original intakt bleibt. ### Reproduzierbare Ergebnisse erhalten In manchen Situationen möchten Sie vielleicht die Zufälligkeit Ihrer Tests beseitigen. Um identische, reproduzierbare Ergebnisse zu erhalten, müssen Sie müssen Sie den Seed festlegen: ```python seed = 42 # python RNG import random random.seed(seed) # pytorch RNGs import torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = True if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # numpy RNG import numpy as np np.random.seed(seed) # tf RNG tf.random.set_seed(seed) ``` ### Tests debuggen Um einen Debugger an der Stelle zu starten, an der die Warnung auftritt, gehen Sie wie folgt vor: ```bash pytest tests/utils/test_logging.py -W error::UserWarning --pdb ``` ## Arbeiten mit Github-Aktionen-Workflows Um einen CI-Job für einen Self-Push-Workflow auszulösen, müssen Sie: 1. Erstellen Sie einen neuen Zweig auf `transformers` Ursprung (keine Gabelung!). 2. Der Name der Verzweigung muss entweder mit `ci_` oder `ci-` beginnen (`main` löst ihn auch aus, aber wir können keine PRs auf `main`). Es wird auch nur für bestimmte Pfade ausgelöst - Sie können die aktuelle Definition finden, falls sie falls sie sich seit der Erstellung dieses Dokuments geändert hat [hier](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml) unter *push:* 3. Erstellen Sie einen PR von diesem Zweig. 4. Dann können Sie sehen, wie der Job erscheint [hier](https://github.com/huggingface/transformers/actions/workflows/self-push.yml). Er wird möglicherweise nicht sofort ausgeführt, wenn es ein Backlog vorhanden ist. ## Testen experimenteller CI-Funktionen Das Testen von CI-Funktionen kann potenziell problematisch sein, da es die normale CI-Funktion beeinträchtigen kann. Wenn also eine neue CI-Funktion hinzugefügt werden soll, sollte dies wie folgt geschehen. 1. Erstellen Sie einen neuen Auftrag, der die zu testende Funktion testet. 2. Der neue Job muss immer erfolgreich sein, so dass er uns ein grünes ✓ gibt (Details unten). 3. Lassen Sie ihn einige Tage lang laufen, um zu sehen, dass eine Vielzahl verschiedener PR-Typen darauf laufen (Benutzer-Gabelzweige, nicht geforkte Zweige, Zweige, die von github.com UI direct file edit stammen, verschiedene erzwungene Pushes, etc. - es gibt es gibt so viele), während Sie die Protokolle des experimentellen Jobs überwachen (nicht den gesamten Job grün, da er absichtlich immer grün) 4. Wenn klar ist, dass alles in Ordnung ist, fügen Sie die neuen Änderungen in die bestehenden Jobs ein. Auf diese Weise wird der normale Arbeitsablauf nicht durch Experimente mit der CI-Funktionalität selbst beeinträchtigt. Wie können wir nun dafür sorgen, dass der Auftrag immer erfolgreich ist, während die neue CI-Funktion entwickelt wird? Einige CIs, wie TravisCI, unterstützen ignore-step-failure und melden den gesamten Job als erfolgreich, aber CircleCI und Github Actions unterstützen dies zum jetzigen Zeitpunkt nicht. Sie können also die folgende Abhilfe verwenden: 1. Setzen Sie `set +euo pipefail` am Anfang des Ausführungsbefehls, um die meisten potenziellen Fehler im Bash-Skript zu unterdrücken. 2. Der letzte Befehl muss ein Erfolg sein: `echo "done"` oder einfach `true` reicht aus. Hier ist ein Beispiel: ```yaml - run: name: run CI experiment command: | set +euo pipefail echo "setting run-all-despite-any-errors-mode" this_command_will_fail echo "but bash continues to run" # emulate another failure false # but the last command must be a success echo "during experiment do not remove: reporting success to CI, even if there were failures" ``` Für einfache Befehle können Sie auch Folgendes tun: ```bash cmd_that_may_fail || true ``` Wenn Sie mit den Ergebnissen zufrieden sind, integrieren Sie den experimentellen Schritt oder Job natürlich in den Rest der normalen Jobs, Entfernen Sie dabei `set +euo pipefail` oder andere Dinge, die Sie eventuell hinzugefügt haben, um sicherzustellen, dass der experimentelle Auftrag nicht den normalen CI-Betrieb nicht beeinträchtigt. Dieser ganze Prozess wäre viel einfacher gewesen, wenn wir nur etwas wie `allow-failure` für den experimentellen Schritt festlegen könnten und ihn scheitern lassen würden, ohne den Gesamtstatus der PRs zu beeinträchtigen. Aber wie bereits erwähnt, haben CircleCI und Github Actions dies im Moment nicht unterstützen. Sie können in diesen CI-spezifischen Threads für diese Funktion stimmen und sehen, wo sie steht: - [Github Actions:](https://github.com/actions/toolkit/issues/399) - [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344)
transformers/docs/source/de/testing.md/0
{ "file_path": "transformers/docs/source/de/testing.md", "repo_id": "transformers", "token_count": 19298 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Utilities for `FeatureExtractors` This page lists all the utility functions that can be used by the audio [`FeatureExtractor`] in order to compute special features from a raw audio using common algorithms such as *Short Time Fourier Transform* or *log mel spectrogram*. Most of those are only useful if you are studying the code of the audio processors in the library. ## Audio Transformations [[autodoc]] audio_utils.hertz_to_mel [[autodoc]] audio_utils.mel_to_hertz [[autodoc]] audio_utils.mel_filter_bank [[autodoc]] audio_utils.optimal_fft_length [[autodoc]] audio_utils.window_function [[autodoc]] audio_utils.spectrogram [[autodoc]] audio_utils.power_to_db [[autodoc]] audio_utils.amplitude_to_db
transformers/docs/source/en/internal/audio_utils.md/0
{ "file_path": "transformers/docs/source/en/internal/audio_utils.md", "repo_id": "transformers", "token_count": 405 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tokenizer A tokenizer is in charge of preparing the inputs for a model. The library contains tokenizers for all the models. Most of the tokenizers are available in two flavors: a full python implementation and a "Fast" implementation based on the Rust library [🤗 Tokenizers](https://github.com/huggingface/tokenizers). The "Fast" implementations allows: 1. a significant speed-up in particular when doing batched tokenization and 2. additional methods to map between the original string (character and words) and the token space (e.g. getting the index of the token comprising a given character or the span of characters corresponding to a given token). The base classes [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] implement the common methods for encoding string inputs in model inputs (see below) and instantiating/saving python and "Fast" tokenizers either from a local file or directory or from a pretrained tokenizer provided by the library (downloaded from HuggingFace's AWS S3 repository). They both rely on [`~tokenization_utils_base.PreTrainedTokenizerBase`] that contains the common methods, and [`~tokenization_utils_base.SpecialTokensMixin`]. [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] thus implement the main methods for using all the tokenizers: - Tokenizing (splitting strings in sub-word token strings), converting tokens strings to ids and back, and encoding/decoding (i.e., tokenizing and converting to integers). - Adding new tokens to the vocabulary in a way that is independent of the underlying structure (BPE, SentencePiece...). - Managing special tokens (like mask, beginning-of-sentence, etc.): adding them, assigning them to attributes in the tokenizer for easy access and making sure they are not split during tokenization. [`BatchEncoding`] holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase`]'s encoding methods (`__call__`, `encode_plus` and `batch_encode_plus`) and is derived from a Python dictionary. When the tokenizer is a pure python tokenizer, this class behaves just like a standard python dictionary and holds the various model inputs computed by these methods (`input_ids`, `attention_mask`...). When the tokenizer is a "Fast" tokenizer (i.e., backed by HuggingFace [tokenizers library](https://github.com/huggingface/tokenizers)), this class provides in addition several advanced alignment methods which can be used to map between the original string (character and words) and the token space (e.g., getting the index of the token comprising a given character or the span of characters corresponding to a given token). # Multimodal Tokenizer Apart from that each tokenizer can be a "multimodal" tokenizer which means that the tokenizer will hold all relevant special tokens as part of tokenizer attributes for easier access. For example, if the tokenizer is loaded from a vision-language model like LLaVA, you will be able to access `tokenizer.image_token_id` to obtain the special image token used as a placeholder. To enable extra special tokens for any type of tokenizer, you have to add the following lines and save the tokenizer. Extra special tokens do not have to be modality related and can ne anything that the model often needs access to. In the below code, tokenizer at `output_dir` will have direct access to three more special tokens. ```python vision_tokenizer = AutoTokenizer.from_pretrained( "llava-hf/llava-1.5-7b-hf", extra_special_tokens={"image_token": "<image>", "boi_token": "<image_start>", "eoi_token": "<image_end>"} ) print(vision_tokenizer.image_token, vision_tokenizer.image_token_id) ("<image>", 32000) ``` ## PreTrainedTokenizer [[autodoc]] PreTrainedTokenizer - __call__ - add_tokens - add_special_tokens - apply_chat_template - batch_decode - decode - encode - push_to_hub - all ## PreTrainedTokenizerFast The [`PreTrainedTokenizerFast`] depend on the [tokenizers](https://huggingface.co/docs/tokenizers) library. The tokenizers obtained from the 🤗 tokenizers library can be loaded very simply into 🤗 transformers. Take a look at the [Using tokenizers from 🤗 tokenizers](../fast_tokenizers) page to understand how this is done. [[autodoc]] PreTrainedTokenizerFast - __call__ - add_tokens - add_special_tokens - apply_chat_template - batch_decode - decode - encode - push_to_hub - all ## BatchEncoding [[autodoc]] BatchEncoding
transformers/docs/source/en/main_classes/tokenizer.md/0
{ "file_path": "transformers/docs/source/en/main_classes/tokenizer.md", "repo_id": "transformers", "token_count": 1447 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BertJapanese ## Overview The BERT models trained on Japanese text. There are models with two different tokenization methods: - Tokenize with MeCab and WordPiece. This requires some extra dependencies, [fugashi](https://github.com/polm/fugashi) which is a wrapper around [MeCab](https://taku910.github.io/mecab/). - Tokenize into characters. To use *MecabTokenizer*, you should `pip install transformers["ja"]` (or `pip install -e .["ja"]` if you install from source) to install dependencies. See [details on cl-tohoku repository](https://github.com/cl-tohoku/bert-japanese). Example of using a model with MeCab and WordPiece tokenization: ```python >>> import torch >>> from transformers import AutoModel, AutoTokenizer >>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese") >>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese") >>> ## Input Japanese Text >>> line = "吾輩は猫である。" >>> inputs = tokenizer(line, return_tensors="pt") >>> print(tokenizer.decode(inputs["input_ids"][0])) [CLS] 吾輩 は 猫 で ある 。 [SEP] >>> outputs = bertjapanese(**inputs) ``` Example of using a model with Character tokenization: ```python >>> bertjapanese = AutoModel.from_pretrained("cl-tohoku/bert-base-japanese-char") >>> tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-char") >>> ## Input Japanese Text >>> line = "吾輩は猫である。" >>> inputs = tokenizer(line, return_tensors="pt") >>> print(tokenizer.decode(inputs["input_ids"][0])) [CLS] 吾 輩 は 猫 で あ る 。 [SEP] >>> outputs = bertjapanese(**inputs) ``` This model was contributed by [cl-tohoku](https://huggingface.co/cl-tohoku). <Tip> This implementation is the same as BERT, except for tokenization method. Refer to [BERT documentation](bert) for API reference information. </Tip> ## BertJapaneseTokenizer [[autodoc]] BertJapaneseTokenizer
transformers/docs/source/en/model_doc/bert-japanese.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bert-japanese.md", "repo_id": "transformers", "token_count": 857 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CamemBERT ## Overview The CamemBERT model was proposed in [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by [Louis Martin](https://huggingface.co/louismartin), [Benjamin Muller](https://huggingface.co/benjamin-mlr), [Pedro Javier Ortiz Suárez](https://huggingface.co/pjox), Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, [Djamé Seddah](https://huggingface.co/Djame), and [Benoît Sagot](https://huggingface.co/sagot). It is based on Facebook's RoBERTa model released in 2019. It is a model trained on 138GB of French text. The abstract from the paper is the following: *Pretrained language models are now ubiquitous in Natural Language Processing. Despite their success, most available models have either been trained on English data or on the concatenation of data in multiple languages. This makes practical use of such models --in all languages except English-- very limited. Aiming to address this issue for French, we release CamemBERT, a French version of the Bi-directional Encoders for Transformers (BERT). We measure the performance of CamemBERT compared to multilingual models in multiple downstream tasks, namely part-of-speech tagging, dependency parsing, named-entity recognition, and natural language inference. CamemBERT improves the state of the art for most of the tasks considered. We release the pretrained model for CamemBERT hoping to foster research and downstream applications for French NLP.* This model was contributed by [the ALMAnaCH team (Inria)](https://huggingface.co/almanach). The original code can be found [here](https://camembert-model.fr/). <Tip> This implementation is the same as RoBERTa. Refer to the [documentation of RoBERTa](roberta) for usage examples as well as the information relative to the inputs and outputs. </Tip> ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## CamembertConfig [[autodoc]] CamembertConfig ## CamembertTokenizer [[autodoc]] CamembertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## CamembertTokenizerFast [[autodoc]] CamembertTokenizerFast <frameworkcontent> <pt> ## CamembertModel [[autodoc]] CamembertModel ## CamembertForCausalLM [[autodoc]] CamembertForCausalLM ## CamembertForMaskedLM [[autodoc]] CamembertForMaskedLM ## CamembertForSequenceClassification [[autodoc]] CamembertForSequenceClassification ## CamembertForMultipleChoice [[autodoc]] CamembertForMultipleChoice ## CamembertForTokenClassification [[autodoc]] CamembertForTokenClassification ## CamembertForQuestionAnswering [[autodoc]] CamembertForQuestionAnswering </pt> <tf> ## TFCamembertModel [[autodoc]] TFCamembertModel ## TFCamembertForCausalLM [[autodoc]] TFCamembertForCausalLM ## TFCamembertForMaskedLM [[autodoc]] TFCamembertForMaskedLM ## TFCamembertForSequenceClassification [[autodoc]] TFCamembertForSequenceClassification ## TFCamembertForMultipleChoice [[autodoc]] TFCamembertForMultipleChoice ## TFCamembertForTokenClassification [[autodoc]] TFCamembertForTokenClassification ## TFCamembertForQuestionAnswering [[autodoc]] TFCamembertForQuestionAnswering </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/camembert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/camembert.md", "repo_id": "transformers", "token_count": 1309 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # FSMT ## Overview FSMT (FairSeq MachineTranslation) models were introduced in [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616) by Nathan Ng, Kyra Yee, Alexei Baevski, Myle Ott, Michael Auli, Sergey Edunov. The abstract of the paper is the following: *This paper describes Facebook FAIR's submission to the WMT19 shared news translation task. We participate in two language pairs and four language directions, English <-> German and English <-> Russian. Following our submission from last year, our baseline systems are large BPE-based transformer models trained with the Fairseq sequence modeling toolkit which rely on sampled back-translations. This year we experiment with different bitext data filtering schemes, as well as with adding filtered back-translated data. We also ensemble and fine-tune our models on domain-specific data, then decode using noisy channel model reranking. Our submissions are ranked first in all four directions of the human evaluation campaign. On En->De, our system significantly outperforms other systems as well as human translations. This system improves upon our WMT'18 submission by 4.5 BLEU points.* This model was contributed by [stas](https://huggingface.co/stas). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/wmt19). ## Implementation Notes - FSMT uses source and target vocabulary pairs that aren't combined into one. It doesn't share embeddings tokens either. Its tokenizer is very similar to [`XLMTokenizer`] and the main model is derived from [`BartModel`]. ## FSMTConfig [[autodoc]] FSMTConfig ## FSMTTokenizer [[autodoc]] FSMTTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## FSMTModel [[autodoc]] FSMTModel - forward ## FSMTForConditionalGeneration [[autodoc]] FSMTForConditionalGeneration - forward
transformers/docs/source/en/model_doc/fsmt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/fsmt.md", "repo_id": "transformers", "token_count": 739 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPTSAN-japanese <Tip warning={true}> This model is in maintenance mode only, we don't accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.40.2. You can do so by running the following command: `pip install -U transformers==4.40.2`. </Tip> ## Overview The GPTSAN-japanese model was released in the repository by Toshiyuki Sakamoto (tanreinama). GPTSAN is a Japanese language model using Switch Transformer. It has the same structure as the model introduced as Prefix LM in the T5 paper, and support both Text Generation and Masked Language Modeling tasks. These basic tasks similarly can fine-tune for translation or summarization. ### Usage example The `generate()` method can be used to generate text using GPTSAN-Japanese model. ```python >>> from transformers import AutoModel, AutoTokenizer >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").cuda() >>> x_tok = tokenizer("は、", prefix_text="織田信長", return_tensors="pt") >>> torch.manual_seed(0) >>> gen_tok = model.generate(x_tok.input_ids.cuda(), token_type_ids=x_tok.token_type_ids.cuda(), max_new_tokens=20) >>> tokenizer.decode(gen_tok[0]) '織田信長は、2004年に『戦国BASARA』のために、豊臣秀吉' ``` ## GPTSAN Features GPTSAN has some unique features. It has a model structure of Prefix-LM. It works as a shifted Masked Language Model for Prefix Input tokens. Un-prefixed inputs behave like normal generative models. The Spout vector is a GPTSAN specific input. Spout is pre-trained with random inputs, but you can specify a class of text or an arbitrary vector during fine-tuning. This allows you to indicate the tendency of the generated text. GPTSAN has a sparse Feed Forward based on Switch-Transformer. You can also add other layers and train them partially. See the original GPTSAN repository for details. ### Prefix-LM Model GPTSAN has the structure of the model named Prefix-LM in the `T5` paper. (The original GPTSAN repository calls it `hybrid`) In GPTSAN, the `Prefix` part of Prefix-LM, that is, the input position that can be referenced by both tokens, can be specified with any length. Arbitrary lengths can also be specified differently for each batch. This length applies to the text entered in `prefix_text` for the tokenizer. The tokenizer returns the mask of the `Prefix` part of Prefix-LM as `token_type_ids`. The model treats the part where `token_type_ids` is 1 as a `Prefix` part, that is, the input can refer to both tokens before and after. ## Usage tips Specifying the Prefix part is done with a mask passed to self-attention. When token_type_ids=None or all zero, it is equivalent to regular causal mask for example: >>> x_token = tokenizer("アイウエ") input_ids: | SOT | SEG | ア | イ | ウ | エ | token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 | prefix_lm_mask: SOT | 1 0 0 0 0 0 | SEG | 1 1 0 0 0 0 | ア | 1 1 1 0 0 0 | イ | 1 1 1 1 0 0 | ウ | 1 1 1 1 1 0 | エ | 1 1 1 1 1 1 | >>> x_token = tokenizer("", prefix_text="アイウエ") input_ids: | SOT | ア | イ | ウ | エ | SEG | token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 | prefix_lm_mask: SOT | 1 1 1 1 1 0 | ア | 1 1 1 1 1 0 | イ | 1 1 1 1 1 0 | ウ | 1 1 1 1 1 0 | エ | 1 1 1 1 1 0 | SEG | 1 1 1 1 1 1 | >>> x_token = tokenizer("ウエ", prefix_text="アイ") input_ids: | SOT | ア | イ | SEG | ウ | エ | token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 | prefix_lm_mask: SOT | 1 1 1 0 0 0 | ア | 1 1 1 0 0 0 | イ | 1 1 1 0 0 0 | SEG | 1 1 1 1 0 0 | ウ | 1 1 1 1 1 0 | エ | 1 1 1 1 1 1 | ### Spout Vector A Spout Vector is a special vector for controlling text generation. This vector is treated as the first embedding in self-attention to bring extraneous attention to the generated tokens. In the pre-trained model published from `Tanrei/GPTSAN-japanese`, the Spout Vector is a 128-dimensional vector that passes through 8 fully connected layers in the model and is projected into the space acting as external attention. The Spout Vector projected by the fully connected layer is split to be passed to all self-attentions. ## GPTSanJapaneseConfig [[autodoc]] GPTSanJapaneseConfig ## GPTSanJapaneseTokenizer [[autodoc]] GPTSanJapaneseTokenizer ## GPTSanJapaneseModel [[autodoc]] GPTSanJapaneseModel ## GPTSanJapaneseForConditionalGeneration [[autodoc]] GPTSanJapaneseForConditionalGeneration - forward
transformers/docs/source/en/model_doc/gptsan-japanese.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gptsan-japanese.md", "repo_id": "transformers", "token_count": 1750 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. specific language governing permissions and limitations under the License. --> # ImageGPT ## Overview The ImageGPT model was proposed in [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. ImageGPT (iGPT) is a GPT-2-like model trained to predict the next pixel value, allowing for both unconditional and conditional image generation. The abstract from the paper is the following: *Inspired by progress in unsupervised representation learning for natural language, we examine whether similar models can learn useful representations for images. We train a sequence Transformer to auto-regressively predict pixels, without incorporating knowledge of the 2D input structure. Despite training on low-resolution ImageNet without labels, we find that a GPT-2 scale model learns strong image representations as measured by linear probing, fine-tuning, and low-data classification. On CIFAR-10, we achieve 96.3% accuracy with a linear probe, outperforming a supervised Wide ResNet, and 99.0% accuracy with full fine-tuning, matching the top supervised pre-trained models. We are also competitive with self-supervised benchmarks on ImageNet when substituting pixels for a VQVAE encoding, achieving 69.0% top-1 accuracy on a linear probe of our features.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/imagegpt_architecture.png" alt="drawing" width="600"/> <small> Summary of the approach. Taken from the [original paper](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf). </small> This model was contributed by [nielsr](https://huggingface.co/nielsr), based on [this issue](https://github.com/openai/image-gpt/issues/7). The original code can be found [here](https://github.com/openai/image-gpt). ## Usage tips - ImageGPT is almost exactly the same as [GPT-2](gpt2), with the exception that a different activation function is used (namely "quick gelu"), and the layer normalization layers don't mean center the inputs. ImageGPT also doesn't have tied input- and output embeddings. - As the time- and memory requirements of the attention mechanism of Transformers scales quadratically in the sequence length, the authors pre-trained ImageGPT on smaller input resolutions, such as 32x32 and 64x64. However, feeding a sequence of 32x32x3=3072 tokens from 0..255 into a Transformer is still prohibitively large. Therefore, the authors applied k-means clustering to the (R,G,B) pixel values with k=512. This way, we only have a 32*32 = 1024-long sequence, but now of integers in the range 0..511. So we are shrinking the sequence length at the cost of a bigger embedding matrix. In other words, the vocabulary size of ImageGPT is 512, + 1 for a special "start of sentence" (SOS) token, used at the beginning of every sequence. One can use [`ImageGPTImageProcessor`] to prepare images for the model. - Despite being pre-trained entirely unsupervised (i.e. without the use of any labels), ImageGPT produces fairly performant image features useful for downstream tasks, such as image classification. The authors showed that the features in the middle of the network are the most performant, and can be used as-is to train a linear model (such as a sklearn logistic regression model for example). This is also referred to as "linear probing". Features can be easily obtained by first forwarding the image through the model, then specifying `output_hidden_states=True`, and then average-pool the hidden states at whatever layer you like. - Alternatively, one can further fine-tune the entire model on a downstream dataset, similar to BERT. For this, you can use [`ImageGPTForImageClassification`]. - ImageGPT comes in different sizes: there's ImageGPT-small, ImageGPT-medium and ImageGPT-large. The authors did also train an XL variant, which they didn't release. The differences in size are summarized in the following table: | **Model variant** | **Depths** | **Hidden sizes** | **Decoder hidden size** | **Params (M)** | **ImageNet-1k Top 1** | |---|---|---|---|---|---| | MiT-b0 | [2, 2, 2, 2] | [32, 64, 160, 256] | 256 | 3.7 | 70.5 | | MiT-b1 | [2, 2, 2, 2] | [64, 128, 320, 512] | 256 | 14.0 | 78.7 | | MiT-b2 | [3, 4, 6, 3] | [64, 128, 320, 512] | 768 | 25.4 | 81.6 | | MiT-b3 | [3, 4, 18, 3] | [64, 128, 320, 512] | 768 | 45.2 | 83.1 | | MiT-b4 | [3, 8, 27, 3] | [64, 128, 320, 512] | 768 | 62.6 | 83.6 | | MiT-b5 | [3, 6, 40, 3] | [64, 128, 320, 512] | 768 | 82.0 | 83.8 | ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ImageGPT. <PipelineTag pipeline="image-classification"/> - Demo notebooks for ImageGPT can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ImageGPT). - [`ImageGPTForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ImageGPTConfig [[autodoc]] ImageGPTConfig ## ImageGPTFeatureExtractor [[autodoc]] ImageGPTFeatureExtractor - __call__ ## ImageGPTImageProcessor [[autodoc]] ImageGPTImageProcessor - preprocess ## ImageGPTModel [[autodoc]] ImageGPTModel - forward ## ImageGPTForCausalImageModeling [[autodoc]] ImageGPTForCausalImageModeling - forward ## ImageGPTForImageClassification [[autodoc]] ImageGPTForImageClassification - forward
transformers/docs/source/en/model_doc/imagegpt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/imagegpt.md", "repo_id": "transformers", "token_count": 1915 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Nemotron ## Nemotron ### License The use of this model is governed by the [NVIDIA AI Foundation Models Community License Agreement](https://developer.nvidia.com/downloads/nv-ai-foundation-models-license). ### Description Nemotron-4 is a family of enterprise ready generative text models compatible with [NVIDIA NeMo Framework](https://www.nvidia.com/en-us/ai-data-science/generative-ai/nemo-framework/). NVIDIA NeMo is an end-to-end, cloud-native platform to build, customize, and deploy generative AI models anywhere. It includes training and inferencing frameworks, guardrailing toolkits, data curation tools, and pretrained models, offering enterprises an easy, cost-effective, and fast way to adopt generative AI. To get access to NeMo Framework, please sign up at [this link](https://developer.nvidia.com/nemo-framework/join). ### References [Announcement Blog](https://developer.nvidia.com/blog/nvidia-ai-foundation-models-build-custom-enterprise-chatbots-and-co-pilots-with-production-ready-llms/) ### Model Architecture **Architecture Type:** Transformer **Network Architecture:** Transformer Decoder (auto-regressive language model). ## Minitron ### Minitron 4B Base Minitron is a family of small language models (SLMs) obtained by pruning NVIDIA's [Nemotron-4 15B](https://arxiv.org/abs/2402.16819) model. We prune model embedding size, attention heads, and MLP intermediate dimension, following which, we perform continued training with distillation to arrive at the final models. Deriving the Minitron 8B and 4B models from the base 15B model using our approach requires up to **40x fewer training tokens** per model compared to training from scratch; this results in **compute cost savings of 1.8x** for training the full model family (15B, 8B, and 4B). Minitron models exhibit up to a 16% improvement in MMLU scores compared to training from scratch, perform comparably to other community models such as Mistral 7B, Gemma 7B and Llama-3 8B, and outperform state-of-the-art compression techniques from the literature. Please refer to our [arXiv paper](https://arxiv.org/abs/2407.14679) for more details. Minitron models are for research and development only. ### HuggingFace Quickstart The following code provides an example of how to load the Minitron-4B model and use it to perform text generation. ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM # Load the tokenizer and model model_path = 'nvidia/Minitron-4B-Base' tokenizer = AutoTokenizer.from_pretrained(model_path) device = 'cuda' dtype = torch.bfloat16 model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=dtype, device_map=device) # Prepare the input text prompt = 'Complete the paragraph: our solar system is' inputs = tokenizer.encode(prompt, return_tensors='pt').to(model.device) # Generate the output outputs = model.generate(inputs, max_length=20) # Decode and print the output output_text = tokenizer.decode(outputs[0]) print(output_text) ``` ### License Minitron is released under the [NVIDIA Open Model License Agreement](https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf). ### Evaluation Results *5-shot performance.* Language Understanding evaluated using [Massive Multitask Language Understanding](https://arxiv.org/abs/2009.03300): | Average | | :---- | | 58.6 | *Zero-shot performance.* Evaluated using select datasets from the [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) with additions: | HellaSwag | Winogrande | GSM8K| ARC-C | XLSum | | :------------- | :------------- | :------------- | :------------- | :------------- | | 75.0 | 74.0 | 24.1 | 50.9 | 29.5 *Code generation performance*. Evaluated using [HumanEval](https://github.com/openai/human-eval): | p@1, 0-Shot | | :------------- | | 23.3 | Please refer to our [paper](https://arxiv.org/abs/2407.14679) for the full set of results. ### Citation If you find our work helpful, please consider citing our paper: ``` @article{minitron2024, title={Compact Language Models via Pruning and Knowledge Distillation}, author={Saurav Muralidharan and Sharath Turuvekere Sreenivas and Raviraj Joshi and Marcin Chochowski and Mostofa Patwary and Mohammad Shoeybi and Bryan Catanzaro and Jan Kautz and Pavlo Molchanov}, journal={arXiv preprint arXiv:2407.14679}, year={2024}, url={https://arxiv.org/abs/2407.14679}, } ``` ## NemotronConfig [[autodoc]] NemotronConfig ## NemotronModel [[autodoc]] NemotronModel - forward ## NemotronForCausalLM [[autodoc]] NemotronForCausalLM - forward ## NemotronForSequenceClassification [[autodoc]] NemotronForSequenceClassification - forward ## NemotronForQuestionAnswering [[autodoc]] NemotronForQuestionAnswering - forward ## NemotronForTokenClassification [[autodoc]] NemotronForTokenClassification - forward
transformers/docs/source/en/model_doc/nemotron.md/0
{ "file_path": "transformers/docs/source/en/model_doc/nemotron.md", "repo_id": "transformers", "token_count": 1671 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PaliGemma ## Overview The PaliGemma model was proposed in [PaliGemma – Google's Cutting-Edge Open Vision Language Model](https://huggingface.co/blog/paligemma) by Google. It is a 3B vision-language model composed by a [SigLIP](siglip) vision encoder and a [Gemma](gemma) language decoder linked by a multimodal linear projection. It cuts an image into a fixed number of VIT tokens and prepends it to an optional prompt. One particularity is that the model uses full block attention on all the image tokens plus the input text tokens. It comes in 3 resolutions, 224x224, 448x448 and 896x896 with 3 base models, with 55 fine-tuned versions for different tasks, and 2 mix models. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/paligemma/paligemma_arch.png" alt="drawing" width="600"/> <small> PaliGemma architecture. Taken from the <a href="https://huggingface.co/blog/paligemma">blog post.</a> </small> This model was contributed by [Molbap](https://huggingface.co/Molbap). ## Usage tips - PaliGemma is not meant for conversational use, and it works best when fine-tuning to a specific use case. Some downstream tasks on which PaliGemma can be fine-tuned include image captioning, visual question answering (VQA), object detection, referring expression segmentation and document understanding. - One can use `PaliGemmaProcessor` to prepare images, text and optional labels for the model. When fine-tuning a PaliGemma model, the `suffix` argument can be passed to the processor which creates the `labels` for the model: ```python prompt = "What is on the flower?" answer = "a bee" inputs = processor(images=raw_image, text=prompt, suffix=answer, return_tensors="pt") ``` ## Usage Example The model can accept a single or multiple images. According to the [paper](https://arxiv.org/abs/2407.07726v1), the checkpoint PaliGemma can transfer to tasks which take multiple images as input. NLVR2 is one such task, which asks one question about two images, and requires looking at both to give the correct answer. Here's an example code for single and multi image inference. ### Single-image Inference ```python from transformers import AutoProcessor, PaliGemmaForConditionalGeneration model_id = "google/paligemma-3b-mix-224" model = PaliGemmaForConditionalGeneration.from_pretrained(model_id) processor = AutoProcessor.from_pretrained(model_id) prompt = "What is on the flower?" image_file = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg?download=true" raw_image = Image.open(requests.get(image_file, stream=True).raw) inputs = processor(raw_image, prompt, return_tensors="pt") output = model.generate(**inputs, max_new_tokens=20) print(processor.decode(output[0], skip_special_tokens=True)[inputs.input_ids.shape[1]: ]) ``` ### Multi-image Inference ```python model_id = "google/paligemma-3b-ft-nlvr2-448" # checkpoint tuned for multiple images model = PaliGemmaForConditionalGeneration.from_pretrained(model_id) processor = PaliGemmaProcessor.from_pretrained(model_id) prompt = "answer en Which of the two pictures shows a snowman, first or second?" stop_sign_image = Image.open( requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw ) snow_image = Image.open( requests.get( "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg", stream=True ).raw ) inputs = processor(images=[[snow_image, stop_sign_image]], text=prompt, return_tensors="pt") output = model.generate(**inputs, max_new_tokens=20) print(processor.decode(output[0], skip_special_tokens=True)[inputs.input_ids.shape[1]: ]) ``` ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with PaliGemma. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. - A blog post introducing all the features of PaliGemma can be found [here](https://huggingface.co/blog/paligemma). - Demo notebooks on how to fine-tune PaliGemma for VQA with the Trainer API along with inference can be found [here](https://github.com/huggingface/notebooks/tree/main/examples/paligemma). - Demo notebooks on how to fine-tune PaliGemma on a custom dataset (receipt image -> JSON) along with inference can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/PaliGemma). 🌎 ## PaliGemmaConfig [[autodoc]] PaliGemmaConfig ## PaliGemmaProcessor [[autodoc]] PaliGemmaProcessor ## PaliGemmaForConditionalGeneration [[autodoc]] PaliGemmaForConditionalGeneration - forward
transformers/docs/source/en/model_doc/paligemma.md/0
{ "file_path": "transformers/docs/source/en/model_doc/paligemma.md", "repo_id": "transformers", "token_count": 1694 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ProphetNet <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=prophetnet"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-prophetnet-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/prophetnet-large-uncased"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The ProphetNet model was proposed in [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou on 13 Jan, 2020. ProphetNet is an encoder-decoder model and can predict n-future tokens for "ngram" language modeling instead of just the next token. The abstract from the paper is the following: *In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.* The Authors' code can be found [here](https://github.com/microsoft/ProphetNet). ## Usage tips - ProphetNet is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - The model architecture is based on the original Transformer, but replaces the “standard” self-attention mechanism in the decoder by a main self-attention mechanism and a self and n-stream (predict) self-attention mechanism. ## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## ProphetNetConfig [[autodoc]] ProphetNetConfig ## ProphetNetTokenizer [[autodoc]] ProphetNetTokenizer ## ProphetNet specific outputs [[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput [[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput [[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput [[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput ## ProphetNetModel [[autodoc]] ProphetNetModel - forward ## ProphetNetEncoder [[autodoc]] ProphetNetEncoder - forward ## ProphetNetDecoder [[autodoc]] ProphetNetDecoder - forward ## ProphetNetForConditionalGeneration [[autodoc]] ProphetNetForConditionalGeneration - forward ## ProphetNetForCausalLM [[autodoc]] ProphetNetForCausalLM - forward
transformers/docs/source/en/model_doc/prophetnet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/prophetnet.md", "repo_id": "transformers", "token_count": 1169 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # RetriBERT <Tip warning={true}> This model is in maintenance mode only, so we won't accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.30.0. You can do so by running the following command: `pip install -U transformers==4.30.0`. </Tip> ## Overview The RetriBERT model was proposed in the blog post [Explain Anything Like I'm Five: A Model for Open Domain Long Form Question Answering](https://yjernite.github.io/lfqa.html). RetriBERT is a small model that uses either a single or pair of BERT encoders with lower-dimension projection for dense semantic indexing of text. This model was contributed by [yjernite](https://huggingface.co/yjernite). Code to train and use the model can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research-projects/distillation). ## RetriBertConfig [[autodoc]] RetriBertConfig ## RetriBertTokenizer [[autodoc]] RetriBertTokenizer ## RetriBertTokenizerFast [[autodoc]] RetriBertTokenizerFast ## RetriBertModel [[autodoc]] RetriBertModel - forward
transformers/docs/source/en/model_doc/retribert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/retribert.md", "repo_id": "transformers", "token_count": 536 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Speech Encoder Decoder Models The [`SpeechEncoderDecoderModel`] can be used to initialize a speech-to-text model with any pretrained speech autoencoding model as the encoder (*e.g.* [Wav2Vec2](wav2vec2), [Hubert](hubert)) and any pretrained autoregressive model as the decoder. The effectiveness of initializing speech-sequence-to-text-sequence models with pretrained checkpoints for speech recognition and speech translation has *e.g.* been shown in [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau. An example of how to use a [`SpeechEncoderDecoderModel`] for inference can be seen in [Speech2Text2](speech_to_text_2). ## Randomly initializing `SpeechEncoderDecoderModel` from model configurations. [`SpeechEncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`Wav2Vec2Model`] configuration for the encoder and the default [`BertForCausalLM`] configuration for the decoder. ```python >>> from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel >>> config_encoder = Wav2Vec2Config() >>> config_decoder = BertConfig() >>> config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) >>> model = SpeechEncoderDecoderModel(config=config) ``` ## Initialising `SpeechEncoderDecoderModel` from a pretrained encoder and a pretrained decoder. [`SpeechEncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained Transformer-based speech model, *e.g.* [Wav2Vec2](wav2vec2), [Hubert](hubert) can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder. Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized. Initializing [`SpeechEncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder). To do so, the `SpeechEncoderDecoderModel` class provides a [`SpeechEncoderDecoderModel.from_encoder_decoder_pretrained`] method. ```python >>> from transformers import SpeechEncoderDecoderModel >>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( ... "facebook/hubert-large-ll60k", "google-bert/bert-base-uncased" ... ) ``` ## Loading an existing `SpeechEncoderDecoderModel` checkpoint and perform inference. To load fine-tuned checkpoints of the `SpeechEncoderDecoderModel` class, [`SpeechEncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers. To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling. ```python >>> from transformers import Wav2Vec2Processor, SpeechEncoderDecoderModel >>> from datasets import load_dataset >>> import torch >>> # load a fine-tuned speech translation model and corresponding processor >>> model = SpeechEncoderDecoderModel.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15") >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15") >>> # let's perform inference on a piece of English speech (which we'll translate to German) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> input_values = processor(ds[0]["audio"]["array"], return_tensors="pt").input_values >>> # autoregressively generate transcription (uses greedy decoding by default) >>> generated_ids = model.generate(input_values) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) Mr. Quilter ist der Apostel der Mittelschicht und wir freuen uns, sein Evangelium willkommen heißen zu können. ``` ## Training Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model on a dataset of (speech, text) pairs. As you can see, only 2 inputs are required for the model in order to compute a loss: `input_values` (which are the speech inputs) and `labels` (which are the `input_ids` of the encoded target sequence). ```python >>> from transformers import AutoTokenizer, AutoFeatureExtractor, SpeechEncoderDecoderModel >>> from datasets import load_dataset >>> encoder_id = "facebook/wav2vec2-base-960h" # acoustic model encoder >>> decoder_id = "google-bert/bert-base-uncased" # text decoder >>> feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id) >>> tokenizer = AutoTokenizer.from_pretrained(decoder_id) >>> # Combine pre-trained encoder and pre-trained decoder to form a Seq2Seq model >>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id) >>> model.config.decoder_start_token_id = tokenizer.cls_token_id >>> model.config.pad_token_id = tokenizer.pad_token_id >>> # load an audio input and pre-process (normalise mean/std to 0/1) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values >>> # load its corresponding transcription and tokenize to generate labels >>> labels = tokenizer(ds[0]["text"], return_tensors="pt").input_ids >>> # the forward function automatically creates the correct decoder_input_ids >>> loss = model(input_values=input_values, labels=labels).loss >>> loss.backward() ``` ## SpeechEncoderDecoderConfig [[autodoc]] SpeechEncoderDecoderConfig ## SpeechEncoderDecoderModel [[autodoc]] SpeechEncoderDecoderModel - forward - from_encoder_decoder_pretrained ## FlaxSpeechEncoderDecoderModel [[autodoc]] FlaxSpeechEncoderDecoderModel - __call__ - from_encoder_decoder_pretrained
transformers/docs/source/en/model_doc/speech-encoder-decoder.md/0
{ "file_path": "transformers/docs/source/en/model_doc/speech-encoder-decoder.md", "repo_id": "transformers", "token_count": 2092 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # T5v1.1 ## Overview T5v1.1 was released in the [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) repository by Colin Raffel et al. It's an improved version of the original T5 model. This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be found [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511). ## Usage tips One can directly plug in the weights of T5v1.1 into a T5 model, like so: ```python >>> from transformers import T5ForConditionalGeneration >>> model = T5ForConditionalGeneration.from_pretrained("google/t5-v1_1-base") ``` T5 Version 1.1 includes the following improvements compared to the original T5 model: - GEGLU activation in the feed-forward hidden layer, rather than ReLU. See [this paper](https://arxiv.org/abs/2002.05202). - Dropout was turned off in pre-training (quality win). Dropout should be re-enabled during fine-tuning. - Pre-trained on C4 only without mixing in the downstream tasks. - No parameter sharing between the embedding and classifier layer. - "xl" and "xxl" replace "3B" and "11B". The model shapes are a bit different - larger `d_model` and smaller `num_heads` and `d_ff`. Note: T5 Version 1.1 was only pre-trained on [C4](https://huggingface.co/datasets/c4) excluding any supervised training. Therefore, this model has to be fine-tuned before it is usable on a downstream task, unlike the original T5 model. Since t5v1.1 was pre-trained unsupervisedly, there's no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix. Google has released the following variants: - [google/t5-v1_1-small](https://huggingface.co/google/t5-v1_1-small) - [google/t5-v1_1-base](https://huggingface.co/google/t5-v1_1-base) - [google/t5-v1_1-large](https://huggingface.co/google/t5-v1_1-large) - [google/t5-v1_1-xl](https://huggingface.co/google/t5-v1_1-xl) - [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl). <Tip> Refer to [T5's documentation page](t5) for all API reference, tips, code examples and notebooks. </Tip>
transformers/docs/source/en/model_doc/t5v1.1.md/0
{ "file_path": "transformers/docs/source/en/model_doc/t5v1.1.md", "repo_id": "transformers", "token_count": 967 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # UniSpeech-SAT ## Overview The UniSpeech-SAT model was proposed in [UniSpeech-SAT: Universal Speech Representation Learning with Speaker Aware Pre-Training](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu . The abstract from the paper is the following: *Self-supervised learning (SSL) is a long-standing goal for speech processing, since it utilizes large-scale unlabeled data and avoids extensive human labeling. Recent years witness great successes in applying self-supervised learning in speech recognition, while limited exploration was attempted in applying SSL for modeling speaker characteristics. In this paper, we aim to improve the existing SSL framework for speaker representation learning. Two methods are introduced for enhancing the unsupervised speaker information extraction. First, we apply the multi-task learning to the current SSL framework, where we integrate the utterance-wise contrastive loss with the SSL objective function. Second, for better speaker discrimination, we propose an utterance mixing strategy for data augmentation, where additional overlapped utterances are created unsupervisedly and incorporate during training. We integrate the proposed methods into the HuBERT framework. Experiment results on SUPERB benchmark show that the proposed system achieves state-of-the-art performance in universal representation learning, especially for speaker identification oriented tasks. An ablation study is performed verifying the efficacy of each proposed method. Finally, we scale up training dataset to 94 thousand hours public audio data and achieve further performance improvement in all SUPERB tasks.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be found [here](https://github.com/microsoft/UniSpeech/tree/main/UniSpeech-SAT). ## Usage tips - UniSpeechSat is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use [`Wav2Vec2Processor`] for the feature extraction. - UniSpeechSat model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. - UniSpeechSat performs especially well on speaker verification, speaker identification, and speaker diarization tasks. ## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) ## UniSpeechSatConfig [[autodoc]] UniSpeechSatConfig ## UniSpeechSat specific outputs [[autodoc]] models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput ## UniSpeechSatModel [[autodoc]] UniSpeechSatModel - forward ## UniSpeechSatForCTC [[autodoc]] UniSpeechSatForCTC - forward ## UniSpeechSatForSequenceClassification [[autodoc]] UniSpeechSatForSequenceClassification - forward ## UniSpeechSatForAudioFrameClassification [[autodoc]] UniSpeechSatForAudioFrameClassification - forward ## UniSpeechSatForXVector [[autodoc]] UniSpeechSatForXVector - forward ## UniSpeechSatForPreTraining [[autodoc]] UniSpeechSatForPreTraining - forward
transformers/docs/source/en/model_doc/unispeech-sat.md/0
{ "file_path": "transformers/docs/source/en/model_doc/unispeech-sat.md", "repo_id": "transformers", "token_count": 1045 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ViTDet ## Overview The ViTDet model was proposed in [Exploring Plain Vision Transformer Backbones for Object Detection](https://arxiv.org/abs/2203.16527) by Yanghao Li, Hanzi Mao, Ross Girshick, Kaiming He. VitDet leverages the plain [Vision Transformer](vit) for the task of object detection. The abstract from the paper is the following: *We explore the plain, non-hierarchical Vision Transformer (ViT) as a backbone network for object detection. This design enables the original ViT architecture to be fine-tuned for object detection without needing to redesign a hierarchical backbone for pre-training. With minimal adaptations for fine-tuning, our plain-backbone detector can achieve competitive results. Surprisingly, we observe: (i) it is sufficient to build a simple feature pyramid from a single-scale feature map (without the common FPN design) and (ii) it is sufficient to use window attention (without shifting) aided with very few cross-window propagation blocks. With plain ViT backbones pre-trained as Masked Autoencoders (MAE), our detector, named ViTDet, can compete with the previous leading methods that were all based on hierarchical backbones, reaching up to 61.3 AP_box on the COCO dataset using only ImageNet-1K pre-training. We hope our study will draw attention to research on plain-backbone detectors.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/detectron2/tree/main/projects/ViTDet). Tips: - At the moment, only the backbone is available. ## VitDetConfig [[autodoc]] VitDetConfig ## VitDetModel [[autodoc]] VitDetModel - forward
transformers/docs/source/en/model_doc/vitdet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vitdet.md", "repo_id": "transformers", "token_count": 579 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLM-V ## Overview XLM-V is multilingual language model with a one million token vocabulary trained on 2.5TB of data from Common Crawl (same as XLM-R). It was introduced in the [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) paper by Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer and Madian Khabsa. From the abstract of the XLM-V paper: *Large multilingual language models typically rely on a single vocabulary shared across 100+ languages. As these models have increased in parameter count and depth, vocabulary size has remained largely unchanged. This vocabulary bottleneck limits the representational capabilities of multilingual models like XLM-R. In this paper, we introduce a new approach for scaling to very large multilingual vocabularies by de-emphasizing token sharing between languages with little lexical overlap and assigning vocabulary capacity to achieve sufficient coverage for each individual language. Tokenizations using our vocabulary are typically more semantically meaningful and shorter compared to XLM-R. Leveraging this improved vocabulary, we train XLM-V, a multilingual language model with a one million token vocabulary. XLM-V outperforms XLM-R on every task we tested on ranging from natural language inference (XNLI), question answering (MLQA, XQuAD, TyDiQA), and named entity recognition (WikiAnn) to low-resource tasks (Americas NLI, MasakhaNER).* This model was contributed by [stefan-it](https://huggingface.co/stefan-it), including detailed experiments with XLM-V on downstream tasks. The experiments repository can be found [here](https://github.com/stefan-it/xlm-v-experiments). ## Usage tips - XLM-V is compatible with the XLM-RoBERTa model architecture, only model weights from [`fairseq`](https://github.com/facebookresearch/fairseq) library had to be converted. - The `XLMTokenizer` implementation is used to load the vocab and performs tokenization. A XLM-V (base size) model is available under the [`facebook/xlm-v-base`](https://huggingface.co/facebook/xlm-v-base) identifier. <Tip> XLM-V architecture is the same as XLM-RoBERTa, refer to [XLM-RoBERTa documentation](xlm-roberta) for API reference, and examples. </Tip>
transformers/docs/source/en/model_doc/xlm-v.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm-v.md", "repo_id": "transformers", "token_count": 809 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image Feature Extraction [[open-in-colab]] Image feature extraction is the task of extracting semantically meaningful features given an image. This has many use cases, including image similarity and image retrieval. Moreover, most computer vision models can be used for image feature extraction, where one can remove the task-specific head (image classification, object detection etc) and get the features. These features are very useful on a higher level: edge detection, corner detection and so on. They may also contain information about the real world (e.g. what a cat looks like) depending on how deep the model is. Therefore, these outputs can be used to train new classifiers on a specific dataset. In this guide, you will: - Learn to build a simple image similarity system on top of the `image-feature-extraction` pipeline. - Accomplish the same task with bare model inference. ## Image Similarity using `image-feature-extraction` Pipeline We have two images of cats sitting on top of fish nets, one of them is generated. ```python from PIL import Image import requests img_urls = ["https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.png", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.jpeg"] image_real = Image.open(requests.get(img_urls[0], stream=True).raw).convert("RGB") image_gen = Image.open(requests.get(img_urls[1], stream=True).raw).convert("RGB") ``` Let's see the pipeline in action. First, initialize the pipeline. If you don't pass any model to it, the pipeline will be automatically initialized with [google/vit-base-patch16-224](google/vit-base-patch16-224). If you'd like to calculate similarity, set `pool` to True. ```python import torch from transformers import pipeline from accelerate.test_utils.testing import get_backend # automatically detects the underlying device type (CUDA, CPU, XPU, MPS, etc.) DEVICE, _, _ = get_backend() pipe = pipeline(task="image-feature-extraction", model_name="google/vit-base-patch16-384", device=DEVICE, pool=True) ``` To infer with `pipe` pass both images to it. ```python outputs = pipe([image_real, image_gen]) ``` The output contains pooled embeddings of those two images. ```python # get the length of a single output print(len(outputs[0][0])) # show outputs print(outputs) # 768 # [[[-0.03909236937761307, 0.43381670117378235, -0.06913255900144577, ``` To get the similarity score, we need to pass them to a similarity function. ```python from torch.nn.functional import cosine_similarity similarity_score = cosine_similarity(torch.Tensor(outputs[0]), torch.Tensor(outputs[1]), dim=1) print(similarity_score) # tensor([0.6043]) ``` If you want to get the last hidden states before pooling, avoid passing any value for the `pool` parameter, as it is set to `False` by default. These hidden states are useful for training new classifiers or models based on the features from the model. ```python pipe = pipeline(task="image-feature-extraction", model_name="google/vit-base-patch16-224", device=DEVICE) outputs = pipe(image_real) ``` Since the outputs are unpooled, we get the last hidden states where the first dimension is the batch size, and the last two are the embedding shape. ```python import numpy as np print(np.array(outputs).shape) # (1, 197, 768) ``` ## Getting Features and Similarities using `AutoModel` We can also use `AutoModel` class of transformers to get the features. `AutoModel` loads any transformers model with no task-specific head, and we can use this to get the features. ```python from transformers import AutoImageProcessor, AutoModel processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") model = AutoModel.from_pretrained("google/vit-base-patch16-224").to(DEVICE) ``` Let's write a simple function for inference. We will pass the inputs to the `processor` first and pass its outputs to the `model`. ```python def infer(image): inputs = processor(image, return_tensors="pt").to(DEVICE) outputs = model(**inputs) return outputs.pooler_output ``` We can pass the images directly to this function and get the embeddings. ```python embed_real = infer(image_real) embed_gen = infer(image_gen) ``` We can get the similarity again over the embeddings. ```python from torch.nn.functional import cosine_similarity similarity_score = cosine_similarity(embed_real, embed_gen, dim=1) print(similarity_score) # tensor([0.6061], device='cuda:0', grad_fn=<SumBackward1>) ```
transformers/docs/source/en/tasks/image_feature_extraction.md/0
{ "file_path": "transformers/docs/source/en/tasks/image_feature_extraction.md", "repo_id": "transformers", "token_count": 1562 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Text to speech [[open-in-colab]] Text-to-speech (TTS) is the task of creating natural-sounding speech from text, where the speech can be generated in multiple languages and for multiple speakers. Several text-to-speech models are currently available in 🤗 Transformers, such as [Bark](../model_doc/bark), [MMS](../model_doc/mms), [VITS](../model_doc/vits) and [SpeechT5](../model_doc/speecht5). You can easily generate audio using the `"text-to-audio"` pipeline (or its alias - `"text-to-speech"`). Some models, like Bark, can also be conditioned to generate non-verbal communications such as laughing, sighing and crying, or even add music. Here's an example of how you would use the `"text-to-speech"` pipeline with Bark: ```py >>> from transformers import pipeline >>> pipe = pipeline("text-to-speech", model="suno/bark-small") >>> text = "[clears throat] This is a test ... and I just took a long pause." >>> output = pipe(text) ``` Here's a code snippet you can use to listen to the resulting audio in a notebook: ```python >>> from IPython.display import Audio >>> Audio(output["audio"], rate=output["sampling_rate"]) ``` For more examples on what Bark and other pretrained TTS models can do, refer to our [Audio course](https://huggingface.co/learn/audio-course/chapter6/pre-trained_models). If you are looking to fine-tune a TTS model, the only text-to-speech models currently available in 🤗 Transformers are [SpeechT5](model_doc/speecht5) and [FastSpeech2Conformer](model_doc/fastspeech2_conformer), though more will be added in the future. SpeechT5 is pre-trained on a combination of speech-to-text and text-to-speech data, allowing it to learn a unified space of hidden representations shared by both text and speech. This means that the same pre-trained model can be fine-tuned for different tasks. Furthermore, SpeechT5 supports multiple speakers through x-vector speaker embeddings. The remainder of this guide illustrates how to: 1. Fine-tune [SpeechT5](../model_doc/speecht5) that was originally trained on English speech on the Dutch (`nl`) language subset of the [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) dataset. 2. Use your refined model for inference in one of two ways: using a pipeline or directly. Before you begin, make sure you have all the necessary libraries installed: ```bash pip install datasets soundfile speechbrain accelerate ``` Install 🤗Transformers from source as not all the SpeechT5 features have been merged into an official release yet: ```bash pip install git+https://github.com/huggingface/transformers.git ``` <Tip> To follow this guide you will need a GPU. If you're working in a notebook, run the following line to check if a GPU is available: ```bash !nvidia-smi ``` or alternatively for AMD GPUs: ```bash !rocm-smi ``` </Tip> We encourage you to log in to your Hugging Face account to upload and share your model with the community. When prompted, enter your token to log in: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load the dataset [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) is a large-scale multilingual speech corpus consisting of data sourced from 2009-2020 European Parliament event recordings. It contains labelled audio-transcription data for 15 European languages. In this guide, we are using the Dutch language subset, feel free to pick another subset. Note that VoxPopuli or any other automated speech recognition (ASR) dataset may not be the most suitable option for training TTS models. The features that make it beneficial for ASR, such as excessive background noise, are typically undesirable in TTS. However, finding top-quality, multilingual, and multi-speaker TTS datasets can be quite challenging. Let's load the data: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("facebook/voxpopuli", "nl", split="train") >>> len(dataset) 20968 ``` 20968 examples should be sufficient for fine-tuning. SpeechT5 expects audio data to have a sampling rate of 16 kHz, so make sure the examples in the dataset meet this requirement: ```py dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) ``` ## Preprocess the data Let's begin by defining the model checkpoint to use and loading the appropriate processor: ```py >>> from transformers import SpeechT5Processor >>> checkpoint = "microsoft/speecht5_tts" >>> processor = SpeechT5Processor.from_pretrained(checkpoint) ``` ### Text cleanup for SpeechT5 tokenization Start by cleaning up the text data. You'll need the tokenizer part of the processor to process the text: ```py >>> tokenizer = processor.tokenizer ``` The dataset examples contain `raw_text` and `normalized_text` features. When deciding which feature to use as the text input, consider that the SpeechT5 tokenizer doesn't have any tokens for numbers. In `normalized_text` the numbers are written out as text. Thus, it is a better fit, and we recommend using `normalized_text` as input text. Because SpeechT5 was trained on the English language, it may not recognize certain characters in the Dutch dataset. If left as is, these characters will be converted to `<unk>` tokens. However, in Dutch, certain characters like `à` are used to stress syllables. In order to preserve the meaning of the text, we can replace this character with a regular `a`. To identify unsupported tokens, extract all unique characters in the dataset using the `SpeechT5Tokenizer` which works with characters as tokens. To do this, write the `extract_all_chars` mapping function that concatenates the transcriptions from all examples into one string and converts it to a set of characters. Make sure to set `batched=True` and `batch_size=-1` in `dataset.map()` so that all transcriptions are available at once for the mapping function. ```py >>> def extract_all_chars(batch): ... all_text = " ".join(batch["normalized_text"]) ... vocab = list(set(all_text)) ... return {"vocab": [vocab], "all_text": [all_text]} >>> vocabs = dataset.map( ... extract_all_chars, ... batched=True, ... batch_size=-1, ... keep_in_memory=True, ... remove_columns=dataset.column_names, ... ) >>> dataset_vocab = set(vocabs["vocab"][0]) >>> tokenizer_vocab = {k for k, _ in tokenizer.get_vocab().items()} ``` Now you have two sets of characters: one with the vocabulary from the dataset and one with the vocabulary from the tokenizer. To identify any unsupported characters in the dataset, you can take the difference between these two sets. The resulting set will contain the characters that are in the dataset but not in the tokenizer. ```py >>> dataset_vocab - tokenizer_vocab {' ', 'à', 'ç', 'è', 'ë', 'í', 'ï', 'ö', 'ü'} ``` To handle the unsupported characters identified in the previous step, define a function that maps these characters to valid tokens. Note that spaces are already replaced by `▁` in the tokenizer and don't need to be handled separately. ```py >>> replacements = [ ... ("à", "a"), ... ("ç", "c"), ... ("è", "e"), ... ("ë", "e"), ... ("í", "i"), ... ("ï", "i"), ... ("ö", "o"), ... ("ü", "u"), ... ] >>> def cleanup_text(inputs): ... for src, dst in replacements: ... inputs["normalized_text"] = inputs["normalized_text"].replace(src, dst) ... return inputs >>> dataset = dataset.map(cleanup_text) ``` Now that you have dealt with special characters in the text, it's time to shift focus to the audio data. ### Speakers The VoxPopuli dataset includes speech from multiple speakers, but how many speakers are represented in the dataset? To determine this, we can count the number of unique speakers and the number of examples each speaker contributes to the dataset. With a total of 20,968 examples in the dataset, this information will give us a better understanding of the distribution of speakers and examples in the data. ```py >>> from collections import defaultdict >>> speaker_counts = defaultdict(int) >>> for speaker_id in dataset["speaker_id"]: ... speaker_counts[speaker_id] += 1 ``` By plotting a histogram you can get a sense of how much data there is for each speaker. ```py >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.hist(speaker_counts.values(), bins=20) >>> plt.ylabel("Speakers") >>> plt.xlabel("Examples") >>> plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/tts_speakers_histogram.png" alt="Speakers histogram"/> </div> The histogram reveals that approximately one-third of the speakers in the dataset have fewer than 100 examples, while around ten speakers have more than 500 examples. To improve training efficiency and balance the dataset, we can limit the data to speakers with between 100 and 400 examples. ```py >>> def select_speaker(speaker_id): ... return 100 <= speaker_counts[speaker_id] <= 400 >>> dataset = dataset.filter(select_speaker, input_columns=["speaker_id"]) ``` Let's check how many speakers remain: ```py >>> len(set(dataset["speaker_id"])) 42 ``` Let's see how many examples are left: ```py >>> len(dataset) 9973 ``` You are left with just under 10,000 examples from approximately 40 unique speakers, which should be sufficient. Note that some speakers with few examples may actually have more audio available if the examples are long. However, determining the total amount of audio for each speaker requires scanning through the entire dataset, which is a time-consuming process that involves loading and decoding each audio file. As such, we have chosen to skip this step here. ### Speaker embeddings To enable the TTS model to differentiate between multiple speakers, you'll need to create a speaker embedding for each example. The speaker embedding is an additional input into the model that captures a particular speaker's voice characteristics. To generate these speaker embeddings, use the pre-trained [spkrec-xvect-voxceleb](https://huggingface.co/speechbrain/spkrec-xvect-voxceleb) model from SpeechBrain. Create a function `create_speaker_embedding()` that takes an input audio waveform and outputs a 512-element vector containing the corresponding speaker embedding. ```py >>> import os >>> import torch >>> from speechbrain.inference.classifiers import EncoderClassifier >>> from accelerate.test_utils.testing import get_backend >>> spk_model_name = "speechbrain/spkrec-xvect-voxceleb" >>> device, _, _ = get_backend() # automatically detects the underlying device type (CUDA, CPU, XPU, MPS, etc.) >>> speaker_model = EncoderClassifier.from_hparams( ... source=spk_model_name, ... run_opts={"device": device}, ... savedir=os.path.join("/tmp", spk_model_name), ... ) >>> def create_speaker_embedding(waveform): ... with torch.no_grad(): ... speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform)) ... speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2) ... speaker_embeddings = speaker_embeddings.squeeze().cpu().numpy() ... return speaker_embeddings ``` It's important to note that the `speechbrain/spkrec-xvect-voxceleb` model was trained on English speech from the VoxCeleb dataset, whereas the training examples in this guide are in Dutch. While we believe that this model will still generate reasonable speaker embeddings for our Dutch dataset, this assumption may not hold true in all cases. For optimal results, we recommend training an X-vector model on the target speech first. This will ensure that the model is better able to capture the unique voice characteristics present in the Dutch language. ### Processing the dataset Finally, let's process the data into the format the model expects. Create a `prepare_dataset` function that takes in a single example and uses the `SpeechT5Processor` object to tokenize the input text and load the target audio into a log-mel spectrogram. It should also add the speaker embeddings as an additional input. ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example = processor( ... text=example["normalized_text"], ... audio_target=audio["array"], ... sampling_rate=audio["sampling_rate"], ... return_attention_mask=False, ... ) ... # strip off the batch dimension ... example["labels"] = example["labels"][0] ... # use SpeechBrain to obtain x-vector ... example["speaker_embeddings"] = create_speaker_embedding(audio["array"]) ... return example ``` Verify the processing is correct by looking at a single example: ```py >>> processed_example = prepare_dataset(dataset[0]) >>> list(processed_example.keys()) ['input_ids', 'labels', 'stop_labels', 'speaker_embeddings'] ``` Speaker embeddings should be a 512-element vector: ```py >>> processed_example["speaker_embeddings"].shape (512,) ``` The labels should be a log-mel spectrogram with 80 mel bins. ```py >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.imshow(processed_example["labels"].T) >>> plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/tts_logmelspectrogram_1.png" alt="Log-mel spectrogram with 80 mel bins"/> </div> Side note: If you find this spectrogram confusing, it may be due to your familiarity with the convention of placing low frequencies at the bottom and high frequencies at the top of a plot. However, when plotting spectrograms as an image using the matplotlib library, the y-axis is flipped and the spectrograms appear upside down. Now apply the processing function to the entire dataset. This will take between 5 and 10 minutes. ```py >>> dataset = dataset.map(prepare_dataset, remove_columns=dataset.column_names) ``` You'll see a warning saying that some examples in the dataset are longer than the maximum input length the model can handle (600 tokens). Remove those examples from the dataset. Here we go even further and to allow for larger batch sizes we remove anything over 200 tokens. ```py >>> def is_not_too_long(input_ids): ... input_length = len(input_ids) ... return input_length < 200 >>> dataset = dataset.filter(is_not_too_long, input_columns=["input_ids"]) >>> len(dataset) 8259 ``` Next, create a basic train/test split: ```py >>> dataset = dataset.train_test_split(test_size=0.1) ``` ### Data collator In order to combine multiple examples into a batch, you need to define a custom data collator. This collator will pad shorter sequences with padding tokens, ensuring that all examples have the same length. For the spectrogram labels, the padded portions are replaced with the special value `-100`. This special value instructs the model to ignore that part of the spectrogram when calculating the spectrogram loss. ```py >>> from dataclasses import dataclass >>> from typing import Any, Dict, List, Union >>> @dataclass ... class TTSDataCollatorWithPadding: ... processor: Any ... def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: ... input_ids = [{"input_ids": feature["input_ids"]} for feature in features] ... label_features = [{"input_values": feature["labels"]} for feature in features] ... speaker_features = [feature["speaker_embeddings"] for feature in features] ... # collate the inputs and targets into a batch ... batch = processor.pad(input_ids=input_ids, labels=label_features, return_tensors="pt") ... # replace padding with -100 to ignore loss correctly ... batch["labels"] = batch["labels"].masked_fill(batch.decoder_attention_mask.unsqueeze(-1).ne(1), -100) ... # not used during fine-tuning ... del batch["decoder_attention_mask"] ... # round down target lengths to multiple of reduction factor ... if model.config.reduction_factor > 1: ... target_lengths = torch.tensor([len(feature["input_values"]) for feature in label_features]) ... target_lengths = target_lengths.new( ... [length - length % model.config.reduction_factor for length in target_lengths] ... ) ... max_length = max(target_lengths) ... batch["labels"] = batch["labels"][:, :max_length] ... # also add in the speaker embeddings ... batch["speaker_embeddings"] = torch.tensor(speaker_features) ... return batch ``` In SpeechT5, the input to the decoder part of the model is reduced by a factor 2. In other words, it throws away every other timestep from the target sequence. The decoder then predicts a sequence that is twice as long. Since the original target sequence length may be odd, the data collator makes sure to round the maximum length of the batch down to be a multiple of 2. ```py >>> data_collator = TTSDataCollatorWithPadding(processor=processor) ``` ## Train the model Load the pre-trained model from the same checkpoint as you used for loading the processor: ```py >>> from transformers import SpeechT5ForTextToSpeech >>> model = SpeechT5ForTextToSpeech.from_pretrained(checkpoint) ``` The `use_cache=True` option is incompatible with gradient checkpointing. Disable it for training. ```py >>> model.config.use_cache = False ``` Define the training arguments. Here we are not computing any evaluation metrics during the training process. Instead, we'll only look at the loss: ```python >>> from transformers import Seq2SeqTrainingArguments >>> training_args = Seq2SeqTrainingArguments( ... output_dir="speecht5_finetuned_voxpopuli_nl", # change to a repo name of your choice ... per_device_train_batch_size=4, ... gradient_accumulation_steps=8, ... learning_rate=1e-5, ... warmup_steps=500, ... max_steps=4000, ... gradient_checkpointing=True, ... fp16=True, ... eval_strategy="steps", ... per_device_eval_batch_size=2, ... save_steps=1000, ... eval_steps=1000, ... logging_steps=25, ... report_to=["tensorboard"], ... load_best_model_at_end=True, ... greater_is_better=False, ... label_names=["labels"], ... push_to_hub=True, ... ) ``` Instantiate the `Trainer` object and pass the model, dataset, and data collator to it. ```py >>> from transformers import Seq2SeqTrainer >>> trainer = Seq2SeqTrainer( ... args=training_args, ... model=model, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... data_collator=data_collator, ... processing_class=processor, ... ) ``` And with that, you're ready to start training! Training will take several hours. Depending on your GPU, it is possible that you will encounter a CUDA "out-of-memory" error when you start training. In this case, you can reduce the `per_device_train_batch_size` incrementally by factors of 2 and increase `gradient_accumulation_steps` by 2x to compensate. ```py >>> trainer.train() ``` To be able to use your checkpoint with a pipeline, make sure to save the processor with the checkpoint: ```py >>> processor.save_pretrained("YOUR_ACCOUNT_NAME/speecht5_finetuned_voxpopuli_nl") ``` Push the final model to the 🤗 Hub: ```py >>> trainer.push_to_hub() ``` ## Inference ### Inference with a pipeline Great, now that you've fine-tuned a model, you can use it for inference! First, let's see how you can use it with a corresponding pipeline. Let's create a `"text-to-speech"` pipeline with your checkpoint: ```py >>> from transformers import pipeline >>> pipe = pipeline("text-to-speech", model="YOUR_ACCOUNT_NAME/speecht5_finetuned_voxpopuli_nl") ``` Pick a piece of text in Dutch you'd like narrated, e.g.: ```py >>> text = "hallo allemaal, ik praat nederlands. groetjes aan iedereen!" ``` To use SpeechT5 with the pipeline, you'll need a speaker embedding. Let's get it from an example in the test dataset: ```py >>> example = dataset["test"][304] >>> speaker_embeddings = torch.tensor(example["speaker_embeddings"]).unsqueeze(0) ``` Now you can pass the text and speaker embeddings to the pipeline, and it will take care of the rest: ```py >>> forward_params = {"speaker_embeddings": speaker_embeddings} >>> output = pipe(text, forward_params=forward_params) >>> output {'audio': array([-6.82714235e-05, -4.26525949e-04, 1.06134125e-04, ..., -1.22392643e-03, -7.76011671e-04, 3.29112721e-04], dtype=float32), 'sampling_rate': 16000} ``` You can then listen to the result: ```py >>> from IPython.display import Audio >>> Audio(output['audio'], rate=output['sampling_rate']) ``` ### Run inference manually You can achieve the same inference results without using the pipeline, however, more steps will be required. Load the model from the 🤗 Hub: ```py >>> model = SpeechT5ForTextToSpeech.from_pretrained("YOUR_ACCOUNT/speecht5_finetuned_voxpopuli_nl") ``` Pick an example from the test dataset obtain a speaker embedding. ```py >>> example = dataset["test"][304] >>> speaker_embeddings = torch.tensor(example["speaker_embeddings"]).unsqueeze(0) ``` Define the input text and tokenize it. ```py >>> text = "hallo allemaal, ik praat nederlands. groetjes aan iedereen!" >>> inputs = processor(text=text, return_tensors="pt") ``` Create a spectrogram with your model: ```py >>> spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings) ``` Visualize the spectrogram, if you'd like to: ```py >>> plt.figure() >>> plt.imshow(spectrogram.T) >>> plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/tts_logmelspectrogram_2.png" alt="Generated log-mel spectrogram"/> </div> Finally, use the vocoder to turn the spectrogram into sound. ```py >>> with torch.no_grad(): ... speech = vocoder(spectrogram) >>> from IPython.display import Audio >>> Audio(speech.numpy(), rate=16000) ``` In our experience, obtaining satisfactory results from this model can be challenging. The quality of the speaker embeddings appears to be a significant factor. Since SpeechT5 was pre-trained with English x-vectors, it performs best when using English speaker embeddings. If the synthesized speech sounds poor, try using a different speaker embedding. Increasing the training duration is also likely to enhance the quality of the results. Even so, the speech clearly is Dutch instead of English, and it does capture the voice characteristics of the speaker (compare to the original audio in the example). Another thing to experiment with is the model's configuration. For example, try using `config.reduction_factor = 1` to see if this improves the results. Finally, it is essential to consider ethical considerations. Although TTS technology has numerous useful applications, it may also be used for malicious purposes, such as impersonating someone's voice without their knowledge or consent. Please use TTS judiciously and responsibly.
transformers/docs/source/en/tasks/text-to-speech.md/0
{ "file_path": "transformers/docs/source/en/tasks/text-to-speech.md", "repo_id": "transformers", "token_count": 7299 }
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Visite rapide - local: installation title: Installation title: Démarrer - sections: - local: tutoriel_pipeline title: Pipelines pour l'inférence - local: autoclass_tutorial title: Chargement d'instances pré-entraînées avec une AutoClass - local: in_translation title: Préparation des données - local: in_translation title: Fine-tune un modèle pré-entraîné - local: run_scripts_fr title: Entraînement avec un script - local: in_translation title: Entraînement distribué avec 🤗 Accelerate - local: in_translation title: Chargement et entraînement des adaptateurs avec 🤗 PEFT - local: in_translation title: Partager un modèle - local: in_translation title: Agents - local: in_translation title: Génération avec LLMs title: Tutoriels - sections: - local: task_summary title: Ce que 🤗 Transformers peut faire - local: tasks_explained title: Comment 🤗 Transformers résout ces tâches title: Guides conceptuels
transformers/docs/source/fr/_toctree.yml/0
{ "file_path": "transformers/docs/source/fr/_toctree.yml", "repo_id": "transformers", "token_count": 391 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Allenamento distribuito con 🤗 Accelerate La parallelizzazione è emersa come strategia per allenare modelli sempre più grandi su hardware limitato e accelerarne la velocità di allenamento di diversi ordini di magnitudine. In Hugging Face, abbiamo creato la libreria [🤗 Accelerate](https://huggingface.co/docs/accelerate) per aiutarti ad allenare in modo semplice un modello 🤗 Transformers su qualsiasi tipo di configurazione distribuita, sia che si tratti di più GPU su una sola macchina o di più GPU su più macchine. In questo tutorial, imparerai come personalizzare il training loop nativo di PyTorch per consentire l'addestramento in un ambiente distribuito. ## Configurazione Inizia installando 🤗 Accelerate: ```bash pip install accelerate ``` Poi importa e crea un oggetto [`Accelerator`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator). `Accelerator` rileverà automaticamente il tuo setup distribuito e inizializzerà tutte le componenti necessarie per l'allenamento. Non dovrai allocare esplicitamente il tuo modello su un device. ```py >>> from accelerate import Accelerator >>> accelerator = Accelerator() ``` ## Preparati ad accelerare Il prossimo passo è quello di passare tutti gli oggetti rilevanti per l'allenamento al metodo [`prepare`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare). Questo include i tuoi DataLoaders per l'allenamento e per la valutazione, un modello e un ottimizzatore: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( ... train_dataloader, eval_dataloader, model, optimizer ... ) ``` ## Backward Infine, sostituisci il tipico metodo `loss.backward()` nel tuo loop di allenamento con il metodo [`backward`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward) di 🤗 Accelerate: ```py >>> for epoch in range(num_epochs): ... for batch in train_dataloader: ... outputs = model(**batch) ... loss = outputs.loss ... accelerator.backward(loss) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ``` Come puoi vedere nel seguente codice, hai solo bisogno di aggiungere quattro righe in più di codice al tuo training loop per abilitare l'allenamento distribuito! ```diff + from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) ``` ## Allenamento Una volta che hai aggiunto le righe di codice rilevanti, lancia il tuo allenamento in uno script o in un notebook come Colaboratory. ### Allenamento con uno script Se stai eseguendo il tuo allenamento da uno script, esegui il comando seguente per creare e salvare un file di configurazione: ```bash accelerate config ``` Poi lancia il tuo allenamento con: ```bash accelerate launch train.py ``` ### Allenamento con un notebook La libreria 🤗 Accelerate può anche essere utilizzata in un notebook se stai pianificando di utilizzare le TPU di Colaboratory. Inserisci tutto il codice legato all'allenamento in una funzione, e passala al `notebook_launcher`: ```py >>> from accelerate import notebook_launcher >>> notebook_launcher(training_function) ``` Per maggiori informazioni relative a 🤗 Accelerate e le sue numerose funzionalità, fai riferimento alla [documentazione](https://huggingface.co/docs/accelerate).
transformers/docs/source/it/accelerate.md/0
{ "file_path": "transformers/docs/source/it/accelerate.md", "repo_id": "transformers", "token_count": 1891 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Inferenza Efficiente su CPU Questa guida si concentra sull'inferenza di modelli di grandi dimensioni in modo efficiente sulla CPU. ## `BetterTransformer` per inferenza più rapida Abbiamo integrato di recente `BetterTransformer` per fare inferenza più rapidamente con modelli per testi, immagini e audio. Visualizza la documentazione sull'integrazione [qui](https://huggingface.co/docs/optimum/bettertransformer/overview) per maggiori dettagli. ## PyTorch JIT-mode (TorchScript) TorchScript è un modo di creare modelli serializzabili e ottimizzabili da codice PyTorch. Ogni programmma TorchScript può esere salvato da un processo Python e caricato in un processo dove non ci sono dipendenze Python. Comparandolo con l'eager mode di default, jit mode in PyTorch normalmente fornisce prestazioni migliori per l'inferenza del modello da parte di metodologie di ottimizzazione come la operator fusion. Per una prima introduzione a TorchScript, vedi la Introduction to [PyTorch TorchScript tutorial](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#tracing-modules). ### IPEX Graph Optimization con JIT-mode Intel® Extension per PyTorch fornnisce ulteriori ottimizzazioni in jit mode per i modelli della serie Transformers. Consigliamo vivamente agli utenti di usufruire dei vantaggi di Intel® Extension per PyTorch con jit mode. Alcuni operator patterns usati fequentemente dai modelli Transformers models sono già supportati in Intel® Extension per PyTorch con jit mode fusions. Questi fusion patterns come Multi-head-attention fusion, Concat Linear, Linear+Add, Linear+Gelu, Add+LayerNorm fusion and etc. sono abilitati e hanno buone performance. I benefici della fusion è fornito agli utenti in modo trasparente. In base alle analisi, il ~70% dei problemi più popolari in NLP question-answering, text-classification, and token-classification possono avere benefici sulle performance grazie ai fusion patterns sia per Float32 precision che per BFloat16 Mixed precision. Vedi maggiori informazioni per [IPEX Graph Optimization](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/graph_optimization.html). #### Installazione di IPEX I rilasci di IPEX seguono PyTorch, verifica i vari approcci per [IPEX installation](https://intel.github.io/intel-extension-for-pytorch/). ### Utilizzo del JIT-mode Per abilitare JIT-mode in Trainer per evaluation e prediction, devi aggiungere `jit_mode_eval` negli argomenti di Trainer. <Tip warning={true}> per PyTorch >= 1.14.0. JIT-mode potrebe giovare a qualsiasi modello di prediction e evaluaion visto che il dict input è supportato in jit.trace per PyTorch < 1.14.0. JIT-mode potrebbe giovare ai modelli il cui ordine dei parametri corrisponde all'ordine delle tuple in ingresso in jit.trace, come i modelli per question-answering. Nel caso in cui l'ordine dei parametri seguenti non corrisponda all'ordine delle tuple in ingresso in jit.trace, come nei modelli di text-classification, jit.trace fallirà e lo cattureremo con una eccezione al fine di renderlo un fallback. Il logging è usato per notificare gli utenti. </Tip> Trovi un esempo con caso d'uso in [Transformers question-answering](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) - Inference using jit mode on CPU: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--jit_mode_eval </b></pre> - Inference with IPEX using jit mode on CPU: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--use_ipex \</b> <b>--jit_mode_eval</b></pre>
transformers/docs/source/it/perf_infer_cpu.md/0
{ "file_path": "transformers/docs/source/it/perf_infer_cpu.md", "repo_id": "transformers", "token_count": 1497 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 🤗 Accelerate を用いた分散学習 モデルが大きくなるにつれて、限られたハードウェアでより大きなモデルを訓練し、訓練速度を大幅に上昇させるための方法として並列処理が浮上してきました。1台のマシンに複数のGPUがあっても、複数のマシンにまたがる複数のGPUがあっても、あらゆるタイプの分散処理セットアップ上でユーザーが簡単に 🤗 Transformers モデルを訓練できるように、 Hugging Face では [🤗 Accelerate](https://huggingface.co/docs/accelerate) ライブラリを作成しました。このチュートリアルでは、PyTorch の訓練ループをカスタマイズして、分散処理環境での訓練を可能にする方法について学びます。 ## セットアップ はじめに 🤗 Accelerate をインストールしましょう: ```bash pip install accelerate ``` そしたらインポートして [`~accelerate.Accelerator`] オブジェクトを作成しましょう。[`~accelerate.Accelerator`] は分散処理セットアップを自動的に検出し、訓練のために必要な全てのコンポーネントを初期化します。モデルをデバイスに明示的に配置する必要はありません。 ```py >>> from accelerate import Accelerator >>> accelerator = Accelerator() ``` ## Accelerate する準備をしましょう 次に、関連する全ての訓練オブジェクトを [`~accelerate.Accelerator.prepare`] メソッドに渡します。これには、訓練と評価それぞれのDataloader、モデル、optimizer が含まれます: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( ... train_dataloader, eval_dataloader, model, optimizer ... ) ``` ## Backward 最後に訓練ループ内の `loss.backward()` を 🤗 Accelerate の [`~accelerate.Accelerator.backward`] メソッドで置き換えます: ```py >>> for epoch in range(num_epochs): ... for batch in train_dataloader: ... outputs = model(**batch) ... loss = outputs.loss ... accelerator.backward(loss) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ``` 以下のコードで確認できる通り、訓練ループに4行のコードを追加するだけで分散学習が可能です! ```diff + from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) ``` ## 訓練する 関連するコードを追加したら、スクリプトまたは Colaboratory などのノートブックで訓練を開始します。 ### スクリプトで訓練する スクリプトから訓練をしている場合は、設定ファイルを作成・保存するために以下のコマンドを実行してください: ```bash accelerate config ``` そして次のようにして訓練を開始します: ```bash accelerate launch train.py ``` ### ノートブックで訓練する Colaboratory の TPU の利用をお考えの場合、🤗 Accelerate はノートブック上で実行することもできます。訓練に必要な全てのコードを関数に含め、[`~accelerate.notebook_launcher`] に渡してください: ```py >>> from accelerate import notebook_launcher >>> notebook_launcher(training_function) ``` 🤗 Accelerate と豊富な機能についてもっと知りたい方は[ドキュメント](https://huggingface.co/docs/accelerate)を参照してください。
transformers/docs/source/ja/accelerate.md/0
{ "file_path": "transformers/docs/source/ja/accelerate.md", "repo_id": "transformers", "token_count": 2185 }
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # インストール 使用しているDeep Learningライブラリに対して、🤗 Transformersをインストールしてキャッシュを設定、そしてオプションでオフラインで実行できるように 🤗 Transformersを設定します。 🤗 TransformersはPython 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, Flaxで動作確認しています。 使用しているDeep Learningライブラリに合わせて、以下のインストール方法に従ってください: * [PyTorch](https://pytorch.org/get-started/locally/)のインストール手順。 * [TensorFlow 2.0](https://www.tensorflow.org/install/pip)のインストール手順。 * [Flax](https://flax.readthedocs.io/en/latest/)のインストール手順。 ## pipでのインストール 🤗 Transformersを[仮想環境](https://docs.python.org/3/library/venv.html)にインストールする必要があります。 もし、Pythonの仮想環境に馴染みがない場合は、この[ガイド](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)をご覧ください。仮想環境によって異なるプロジェクトの管理がより簡単になり、依存関係間の互換性の問題を回避できます。 まず、プロジェクトディレクトリに仮想環境を作成することから始めましょう: ```bash python -m venv .env ``` 仮想環境を起動しましょう。LinuxとMacOsの場合は以下のコマンドで起動します: ```bash source .env/bin/activate ``` Windowsで仮想環境を起動します ```bash .env/Scripts/activate ``` これで、次のコマンドで🤗 Transformersをインストールする準備が整いました: ```bash pip install transformers ``` CPU対応のみ必要な場合、🤗 TransformersとDeep Learningライブラリを1行でインストールできるようになっていて便利です。例えば、🤗 TransformersとPyTorchを以下のように一緒にインストールできます: ```bash pip install transformers[torch] ``` 🤗 TransformersとTensorFlow 2.0: ```bash pip install transformers[tf-cpu] ``` 🤗 TransformersとFlax: ```bash pip install transformers[flax] ``` 最後に、以下のコマンドを実行することで🤗 Transformersが正しくインストールされているかを確認します。学習済みモデルがダウンロードされます: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` その後、ラベルとスコアが出力されます: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## ソースからのインストール 以下のコマンドでソースから🤗 Transformersをインストールします: ```bash pip install git+https://github.com/huggingface/transformers ``` このコマンドは最新の安定版ではなく、開発における最新の`main`バージョンをインストールします。`main`バージョンは最新の開発状況に対応するのに便利です。例えば、最後の公式リリース以降にバグが修正されたが、新しいリリースがまだ展開されていない場合などです。しかし、これは`main`バージョンが常に安定しているとは限らないことを意味します。私たちは`main`バージョンの運用を維持するよう努め、ほとんどの問題は通常、数時間から1日以内に解決されます。もし問題に遭遇した場合は、より早く修正できるように[Issue](https://github.com/huggingface/transformers/issues)を作成してください! 以下のコマンドを実行して、🤗 Transformersが正しくインストールされているかどうかを確認します: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## 編集可能なインストール 必要に応じて、編集可能なインストールをします: * ソースコードの`main`バージョンを使います。 * 🤗 Transformersにコントリビュートし、コードの変更をテストする必要があります。 以下のコマンドでレポジトリをクローンして、🤗 Transformersをインストールします: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` 上記のコマンドは、レポジトリをクローンしたフォルダとPythonのライブラリをパスをリンクします。Pythonは通常のライブラリパスに加えて、あなたがクローンしたフォルダの中も見るようになります。例えば、Pythonパッケージが通常、`~/anaconda3/envs/main/lib/python3.7/site-packages/`にインストールされている場合、Pythonはクローンしたフォルダも検索するようになります: `~/transformers/`. <Tip warning={true}> ライブラリーを使い続けたい場合は、transformersフォルダーを保持しつづける必要があります。 </Tip> これで、次のコマンドで簡単にクローンを🤗 Transformersの最新版に更新できます: ```bash cd ~/transformers/ git pull ``` Python環境は次回の実行時に🤗 Transformersの`main`バージョンを見つけるようになります。 ## condaでのインストール `conda-forge`のcondaチャンネルからインストールします: ```bash conda install conda-forge::transformers ``` ## キャッシュの設定 学習済みモデルはダウンロードされ、ローカルにキャッシュされます: `~/.cache/huggingface/hub`. これはシェル環境変数`TRANSFORMERS_CACHE`で指定されるデフォルトのディレクトリです。Windowsでは、デフォルトのディレクトリは`C:\Users\username\.cache\huggingface\hub`になっています。異なるキャッシュディレクトリを指定するために、以下のシェル環境変数を変更することが可能です。優先度は以下の順番に対応します: 1. シェル環境変数 (デフォルト): `HF_HUB_CACHE` または `TRANSFORMERS_CACHE`. 2. シェル環境変数: `HF_HOME`. 3. シェル環境変数: `XDG_CACHE_HOME` + `/huggingface`. <Tip> もし、以前のバージョンのライブラリを使用していた人で、`PYTORCH_TRANSFORMERS_CACHE`または`PYTORCH_PRETRAINED_BERT_CACHE`を設定していた場合、シェル環境変数`TRANSFORMERS_CACHE`を指定しない限り🤗 Transformersはこれらのシェル環境変数を使用します。 </Tip> ## オフラインモード 🤗 Transformersはローカルファイルのみを使用することでファイアウォールやオフラインの環境でも動作させることができます。この動作を有効にするためには、環境変数`HF_HUB_OFFLINE=1`を設定します。 <Tip> 環境変数`HF_DATASETS_OFFLINE=1`を設定し、オフライントレーニングワークフローに[🤗 Datasets](https://huggingface.co/docs/datasets/)を追加します。 </Tip> 例えば、外部インスタンスに対してファイアウォールで保護された通常のネットワーク上でプログラムを実行する場合、通常以下のようなコマンドで実行することになります: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` オフラインインスタンスでこの同じプログラムを実行します: ```bash HF_DATASETS_OFFLINE=1 HF_HUB_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` このスクリプトは、ローカルファイルのみを検索することが分かっているので、ハングアップしたりタイムアウトを待ったりすることなく実行されるはずです。 ### オフラインで使用するためにモデルやトークナイザーを取得する オフラインで🤗 Transformersを使用するもう1つの方法は、前もってファイルをダウンロードしておき、オフラインで使用する必要があるときにそのローカルパスを指定することです。これには3つの方法があります: * [Model Hub](https://huggingface.co/models)のユーザーインターフェース上から↓アイコンをクリックしてファイルをダウンロードする方法。 ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * [`PreTrainedModel.from_pretrained`]および[`PreTrainedModel.save_pretrained`]のワークフローを使用する方法: 1. [`PreTrainedModel.from_pretrained`]で前もってファイルをダウンロードします: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. [`PreTrainedModel.save_pretrained`]で指定されたディレクトリにファイルを保存しておきます: ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. オフラインにある時、[`PreTrainedModel.from_pretrained`]に指定したディレクトリからファイルをリロードします: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * プログラム的に[huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub)ライブラリを用いて、ファイルをダウンロードする方法: 1. 仮想環境に`huggingface_hub`ライブラリをインストールします: ```bash python -m pip install huggingface_hub ``` 2. 指定のパスにファイルをダウンロードするために、[`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub)関数を使用します。例えば、以下のコマンドで、[T0](https://huggingface.co/bigscience/T0_3B)モデルの`config.json`ファイルを指定のパスにダウンロードできます: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` ファイルがダウンロードされ、ローカルにキャッシュされたら、そのローカルパスを指定してファイルをロードして使用します: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> Hubに保存されているファイルをダウンロードする方法の詳細については、[How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream)セクションを参照してください。 </Tip>
transformers/docs/source/ja/installation.md/0
{ "file_path": "transformers/docs/source/ja/installation.md", "repo_id": "transformers", "token_count": 4805 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AltCLIP ## 概要 AltCLIPモデルは、「[AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679v2)」という論文でZhongzhi Chen、Guang Liu、Bo-Wen Zhang、Fulong Ye、Qinghong Yang、Ledell Wuによって提案されました。AltCLIP(CLIPの言語エンコーダーの代替)は、様々な画像-テキストペアおよびテキスト-テキストペアでトレーニングされたニューラルネットワークです。CLIPのテキストエンコーダーを事前学習済みの多言語テキストエンコーダーXLM-Rに置き換えることで、ほぼ全てのタスクでCLIPに非常に近い性能を得られ、オリジナルのCLIPの能力を多言語理解などに拡張しました。 論文の要旨は以下の通りです: *この研究では、強力なバイリンガルマルチモーダル表現モデルを訓練するための概念的に単純で効果的な方法を提案します。OpenAIによってリリースされたマルチモーダル表現モデルCLIPから開始し、そのテキストエンコーダを事前学習済みの多言語テキストエンコーダXLM-Rに交換し、教師学習と対照学習からなる2段階のトレーニングスキーマを用いて言語と画像の表現を整合させました。幅広いタスクの評価を通じて、我々の方法を検証します。ImageNet-CN、Flicker30k-CN、COCO-CNを含む多くのタスクで新たな最先端の性能を達成しました。さらに、ほぼすべてのタスクでCLIPに非常に近い性能を得ており、これはCLIPのテキストエンコーダを変更するだけで、多言語理解などの拡張を実現できることを示唆しています。* このモデルは[jongjyh](https://huggingface.co/jongjyh)により提供されました。 ## 使用上のヒントと使用例 AltCLIPの使用方法はCLIPに非常に似ています。CLIPとの違いはテキストエンコーダーにあります。私たちはカジュアルアテンションではなく双方向アテンションを使用し、XLM-Rの[CLS]トークンをテキスト埋め込みを表すものとして取ることに留意してください。 AltCLIPはマルチモーダルな視覚言語モデルです。これは画像とテキストの類似度や、ゼロショット画像分類に使用できます。AltCLIPはViTのようなTransformerを使用して視覚的特徴を、双方向言語モデルを使用してテキスト特徴を取得します。テキストと視覚の両方の特徴は、同一の次元を持つ潜在空間に射影されます。射影された画像とテキスト特徴間のドット積が類似度スコアとして使用されます。 Transformerエンコーダーに画像を与えるには、各画像を固定サイズの重複しないパッチの系列に分割し、それらを線形に埋め込みます。画像全体を表現するための[CLS]トークンが追加されます。著者は絶対位置埋め込みも追加し、結果として得られるベクトルの系列を標準的なTransformerエンコーダーに供給します。[`CLIPImageProcessor`]を使用して、モデルのために画像のサイズ変更(または拡大縮小)と正規化を行うことができます。 [`AltCLIPProcessor`]は、テキストのエンコードと画像の前処理を両方行うために、[`CLIPImageProcessor`]と[`XLMRobertaTokenizer`]を単一のインスタンスにラップします。以下の例は、[`AltCLIPProcessor`]と[`AltCLIPModel`]を使用して画像-テキスト類似スコアを取得する方法を示しています。 ```python >>> from PIL import Image >>> import requests >>> from transformers import AltCLIPModel, AltCLIPProcessor >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") >>> processor = AltCLIPProcessor.from_pretrained("BAAI/AltCLIP") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` <Tip> このモデルは`CLIPModel`をベースにしており、オリジナルの[CLIP](clip)と同じように使用してください。 </Tip> ## AltCLIPConfig [[autodoc]] AltCLIPConfig - from_text_vision_configs ## AltCLIPTextConfig [[autodoc]] AltCLIPTextConfig ## AltCLIPVisionConfig [[autodoc]] AltCLIPVisionConfig ## AltCLIPProcessor [[autodoc]] AltCLIPProcessor ## AltCLIPModel [[autodoc]] AltCLIPModel - forward - get_text_features - get_image_features ## AltCLIPTextModel [[autodoc]] AltCLIPTextModel - forward ## AltCLIPVisionModel [[autodoc]] AltCLIPVisionModel - forward
transformers/docs/source/ja/model_doc/altclip.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/altclip.md", "repo_id": "transformers", "token_count": 2400 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Big Transfer (BiT) ## Overview BiT モデルは、Alexander Kolesnikov、Lucas Beyer、Xiaohua Zhai、Joan Puigcerver、Jessica Yung、Sylvain Gelly によって [Big Transfer (BiT): General Visual Representation Learning](https://arxiv.org/abs/1912.11370) で提案されました。ニール・ホールズビー。 BiT は、[ResNet](resnet) のようなアーキテクチャ (具体的には ResNetv2) の事前トレーニングをスケールアップするための簡単なレシピです。この方法により、転移学習が大幅に改善されます。 論文の要約は次のとおりです。 *事前トレーニングされた表現の転送により、サンプル効率が向上し、視覚用のディープ ニューラル ネットワークをトレーニングする際のハイパーパラメーター調整が簡素化されます。大規模な教師ありデータセットでの事前トレーニングと、ターゲット タスクでのモデルの微調整のパラダイムを再検討します。私たちは事前トレーニングをスケールアップし、Big Transfer (BiT) と呼ぶシンプルなレシピを提案します。いくつかの慎重に選択されたコンポーネントを組み合わせ、シンプルなヒューリスティックを使用して転送することにより、20 を超えるデータセットで優れたパフォーマンスを実現します。 BiT は、クラスごとに 1 つのサンプルから合計 100 万のサンプルまで、驚くほど広範囲のデータ領域にわたって良好にパフォーマンスを発揮します。 BiT は、ILSVRC-2012 で 87.5%、CIFAR-10 で 99.4%、19 タスクの Visual Task Adaptation Benchmark (VTAB) で 76.3% のトップ 1 精度を達成しました。小規模なデータセットでは、BiT は ILSVRC-2012 (クラスあたり 10 例) で 76.8%、CIFAR-10 (クラスあたり 10 例) で 97.0% を達成しました。高い転写性能を実現する主要成分を詳細に分析※。 ## Usage tips - BiT モデルは、アーキテクチャの点で ResNetv2 と同等ですが、次の点が異なります: 1) すべてのバッチ正規化層が [グループ正規化](https://arxiv.org/abs/1803.08494) に置き換えられます。 2) [重みの標準化](https://arxiv.org/abs/1903.10520) は畳み込み層に使用されます。著者らは、両方の組み合わせが大きなバッチサイズでのトレーニングに役立ち、重要な効果があることを示しています。 転移学習への影響。 このモデルは、[nielsr](https://huggingface.co/nielsr) によって提供されました。 元のコードは [こちら](https://github.com/google-research/big_transfer) にあります。 ## Resources BiT を始めるのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示されている) リソースのリスト。 <PipelineTag pipeline="image-classification"/> - [`BitForImageClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)。 - 参照: [画像分類タスク ガイド](../tasks/image_classification) ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 ## BitConfig [[autodoc]] BitConfig ## BitImageProcessor [[autodoc]] BitImageProcessor - preprocess ## BitModel [[autodoc]] BitModel - forward ## BitForImageClassification [[autodoc]] BitForImageClassification - forward
transformers/docs/source/ja/model_doc/bit.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bit.md", "repo_id": "transformers", "token_count": 1858 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CLVP ## Overview CLVP (Contrastive Language-Voice Pretrained Transformer) モデルは、James Betker によって [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) で提案されました。 論文の要約は次のとおりです。 *近年、画像生成の分野は自己回帰変換器と DDPM の応用によって革命を起こしています。これらのアプローチは、画像生成のプロセスを段階的な確率的プロセスとしてモデル化し、大量のコンピューティングとデータを活用して画像の分布を学習します。パフォーマンスを向上させるこの方法論は、画像に限定される必要はありません。この論文では、画像生成ドメインの進歩を音声合成に適用する方法について説明します。その結果、表現力豊かなマルチ音声テキスト読み上げシステムである TorToise が誕生しました。 このモデルは [Susnato Dhar](https://huggingface.co/susnato) によって提供されました。 元のコードは [ここ](https://github.com/neonbjb/tortoise-tts) にあります。 ## Usage tips 1. CLVP は Tortoise TTS モデルの不可欠な部分です。 2. CLVP を使用して、生成されたさまざまな音声候補を提供されたテキストと比較することができ、最良の音声トークンが拡散モデルに転送されます。 3. Tortoise の使用には、[`ClvpModelForConditionalGeneration.generate()`] メソッドの使用を強くお勧めします。 4. 16 kHz を期待する他のオーディオ モデルとは対照的に、CLVP モデルはオーディオが 22.05 kHz でサンプリングされることを期待していることに注意してください。 ## Brief Explanation: - [`ClvpTokenizer`] はテキスト入力をトークン化し、[`ClvpFeatureExtractor`] は目的のオーディオからログ メル スペクトログラムを抽出します。 - [`ClvpConditioningEncoder`] は、これらのテキスト トークンとオーディオ表現を取得し、テキストとオーディオに基づいて条件付けされた埋め込みに変換します。 - [`ClvpForCausalLM`] は、これらの埋め込みを使用して複数の音声候補を生成します。 - 各音声候補は音声エンコーダ ([`ClvpEncoder`]) を通過してベクトル表現に変換され、テキスト エンコーダ ([`ClvpEncoder`]) はテキスト トークンを同じ潜在空間に変換します。 - 最後に、各音声ベクトルをテキスト ベクトルと比較して、どの音声ベクトルがテキスト ベクトルに最も類似しているかを確認します。 - [`ClvpModelForConditionalGeneration.generate()`] は、上記のすべてのロジックを 1 つのメソッドに圧縮します。 例 : ```python >>> import datasets >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library). >>> text = "This is an example text." >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) >>> sample = ds[0]["audio"] >>> # Define processor and model. >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") >>> # Generate processor output and model output. >>> processor_output = processor(raw_speech=sample["array"], sampling_rate=sample["sampling_rate"], text=text, return_tensors="pt") >>> generated_output = model.generate(**processor_output) ``` ## ClvpConfig [[autodoc]] ClvpConfig - from_sub_model_configs ## ClvpEncoderConfig [[autodoc]] ClvpEncoderConfig ## ClvpDecoderConfig [[autodoc]] ClvpDecoderConfig ## ClvpTokenizer [[autodoc]] ClvpTokenizer - save_vocabulary ## ClvpFeatureExtractor [[autodoc]] ClvpFeatureExtractor - __call__ ## ClvpProcessor [[autodoc]] ClvpProcessor - __call__ - decode - batch_decode ## ClvpModelForConditionalGeneration [[autodoc]] ClvpModelForConditionalGeneration - forward - generate - get_text_features - get_speech_features ## ClvpForCausalLM [[autodoc]] ClvpForCausalLM ## ClvpModel [[autodoc]] ClvpModel ## ClvpEncoder [[autodoc]] ClvpEncoder ## ClvpDecoder [[autodoc]] ClvpDecoder
transformers/docs/source/ja/model_doc/clvp.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/clvp.md", "repo_id": "transformers", "token_count": 2038 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeiT ## Overview DeiT モデルは、Hugo Touvron、Matthieu Cord、Matthijs Douze、Francisco Massa、Alexandre Sablayrolles, Hervé Jégou.によって [Training data-efficient image Transformers & distillation through attention](https://arxiv.org/abs/2012.12877) で提案されました。 サブレイロール、エルヴェ・ジェグー。 [Dosovitskiy et al., 2020](https://arxiv.org/abs/2010.11929) で紹介された [Vision Transformer (ViT)](vit) は、既存の畳み込みニューラルと同等、またはそれを上回るパフォーマンスを発揮できることを示しました。 Transformer エンコーダ (BERT のような) を使用したネットワーク。ただし、その論文で紹介された ViT モデルには、次のトレーニングが必要でした。 外部データを使用して、数週間にわたる高価なインフラストラクチャ。 DeiT (データ効率の高い画像変換器) はさらに優れています 画像分類用に効率的にトレーニングされたトランスフォーマーにより、必要なデータとコンピューティング リソースがはるかに少なくなります。 オリジナルの ViT モデルとの比較。 論文の要約は次のとおりです。 *最近、純粋に注意に基づくニューラル ネットワークが、画像などの画像理解タスクに対処できることが示されました。 分類。ただし、これらのビジュアル トランスフォーマーは、 インフラストラクチャが高価であるため、その採用が制限されています。この作業では、コンボリューションフリーの競争力のあるゲームを作成します。 Imagenet のみでトレーニングしてトランスフォーマーを作成します。 1 台のコンピューターで 3 日以内にトレーニングを行います。私たちの基準となるビジョン トランス (86M パラメータ) は、外部なしで ImageNet 上で 83.1% (単一クロップ評価) のトップ 1 の精度を達成します。 データ。さらに重要なのは、トランスフォーマーに特有の教師と生徒の戦略を導入することです。蒸留に依存している 学生が注意を払って教師から学ぶことを保証するトークン。私たちはこのトークンベースに興味を示します 特に convnet を教師として使用する場合。これにより、convnet と競合する結果を報告できるようになります。 Imagenet (最大 85.2% の精度が得られます) と他のタスクに転送するときの両方で。私たちはコードを共有し、 モデル。* このモデルは、[nielsr](https://huggingface.co/nielsr) によって提供されました。このモデルの TensorFlow バージョンは、[amyeroberts](https://huggingface.co/amyeroberts) によって追加されました。 ## Usage tips - ViT と比較して、DeiT モデルはいわゆる蒸留トークンを使用して教師から効果的に学習します (これは、 DeiT 論文は、ResNet のようなモデルです)。蒸留トークンは、バックプロパゲーションを通じて、と対話することによって学習されます。 セルフアテンション層を介したクラス ([CLS]) とパッチ トークン。 - 抽出されたモデルを微調整するには 2 つの方法があります。(1) 上部に予測ヘッドを配置するだけの古典的な方法。 クラス トークンの最終的な非表示状態を抽出し、蒸留シグナルを使用しない、または (2) 両方の 予測ヘッドはクラス トークンの上と蒸留トークンの上にあります。その場合、[CLS] 予測は head は、head の予測とグラウンド トゥルース ラベル間の通常のクロスエントロピーを使用してトレーニングされます。 蒸留予測ヘッドは、硬蒸留 (予測と予測の間のクロスエントロピー) を使用してトレーニングされます。 蒸留ヘッドと教師が予測したラベル)。推論時に、平均予測を取得します。 最終的な予測として両頭の間で。 (2) は「蒸留による微調整」とも呼ばれます。 下流のデータセットですでに微調整されている教師。モデル的には (1) に相当します。 [`DeiTForImageClassification`] と (2) に対応します。 [`DeiTForImageClassificationWithTeacher`]。 - 著者らは (2) についてもソフト蒸留を試みたことに注意してください (この場合、蒸留予測ヘッドは 教師のソフトマックス出力に一致するように KL ダイバージェンスを使用してトレーニングしました)が、ハード蒸留が最良の結果をもたらしました。 - リリースされたすべてのチェックポイントは、ImageNet-1k のみで事前トレーニングおよび微調整されました。外部データは使用されませんでした。これは JFT-300M データセット/Imagenet-21k などの外部データを使用した元の ViT モデルとは対照的です。 事前トレーニング。 - DeiT の作者は、より効率的にトレーニングされた ViT モデルもリリースしました。これは、直接プラグインできます。 [`ViTModel`] または [`ViTForImageClassification`]。データなどのテクニック はるかに大規模なデータセットでのトレーニングをシミュレートするために、拡張、最適化、正則化が使用されました。 (ただし、事前トレーニングには ImageNet-1k のみを使用します)。 4 つのバリエーション (3 つの異なるサイズ) が利用可能です。 *facebook/deit-tiny-patch16-224*、*facebook/deit-small-patch16-224*、*facebook/deit-base-patch16-224* および *facebook/deit-base-patch16-384*。以下を行うには [`DeiTImageProcessor`] を使用する必要があることに注意してください。 モデル用の画像を準備します。 ## Resources DeiT を始めるのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示されている) リソースのリスト。 <PipelineTag pipeline="image-classification"/> - [`DeiTForImageClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)。 - 参照: [画像分類タスク ガイド](../tasks/image_classification) それに加えて: - [`DeiTForMaskedImageModeling`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining) でサポートされています。 ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 ## DeiTConfig [[autodoc]] DeiTConfig ## DeiTFeatureExtractor [[autodoc]] DeiTFeatureExtractor - __call__ ## DeiTImageProcessor [[autodoc]] DeiTImageProcessor - preprocess ## DeiTImageProcessorFast [[autodoc]] DeiTImageProcessorFast - preprocess <frameworkcontent> <pt> ## DeiTModel [[autodoc]] DeiTModel - forward ## DeiTForMaskedImageModeling [[autodoc]] DeiTForMaskedImageModeling - forward ## DeiTForImageClassification [[autodoc]] DeiTForImageClassification - forward ## DeiTForImageClassificationWithTeacher [[autodoc]] DeiTForImageClassificationWithTeacher - forward </pt> <tf> ## TFDeiTModel [[autodoc]] TFDeiTModel - call ## TFDeiTForMaskedImageModeling [[autodoc]] TFDeiTForMaskedImageModeling - call ## TFDeiTForImageClassification [[autodoc]] TFDeiTForImageClassification - call ## TFDeiTForImageClassificationWithTeacher [[autodoc]] TFDeiTForImageClassificationWithTeacher - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/deit.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/deit.md", "repo_id": "transformers", "token_count": 3713 }
<!-- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ このファイルはMarkdown形式ですが、Hugging Faceのドキュメントビルダー向けに特定の構文を含んでいるため、 通常のMarkdownビューアーで正しく表示されないことに注意してください。 --> # Quick tour [[open-in-colab]] 🤗 Transformersを使い始めましょう! 開発者であろうと、日常的なユーザーであろうと、このクイックツアーは 初めて始めるのを支援し、[`pipeline`]を使った推論方法、[AutoClass](./model_doc/auto)で事前学習済みモデルとプリプロセッサをロードする方法、 そしてPyTorchまたはTensorFlowで素早くモデルをトレーニングする方法を示します。 初心者の場合、ここで紹介されたコンセプトの詳細な説明を提供する チュートリアルまたは[コース](https://huggingface.co/course/chapter1/1)を次に参照することをお勧めします。 始める前に、必要なライブラリがすべてインストールされていることを確認してください: ```bash !pip install transformers datasets evaluate accelerate ``` あなたはまた、好きな機械学習フレームワークをインストールする必要があります: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## Pipeline <Youtube id="tiZFewofSLM"/> [`pipeline`] は、事前学習済みモデルを推論に最も簡単で高速な方法です。 [`pipeline`] を使用することで、さまざまなモダリティにわたる多くのタスクに対して即座に使用できます。 いくつかのタスクは以下の表に示されています: <Tip> 使用可能なタスクの完全な一覧については、[pipeline API リファレンス](./main_classes/pipelines)を確認してください。 </Tip> | **タスク** | **説明** | **モダリティ** | **パイプライン識別子** | |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|-----------------------------------------------| | テキスト分類 | テキストのシーケンスにラベルを割り当てる | NLP | pipeline(task="sentiment-analysis") | | テキスト生成 | プロンプトを指定してテキストを生成する | NLP | pipeline(task="text-generation") | | 要約 | テキストまたはドキュメントの要約を生成する | NLP | pipeline(task="summarization") | | 画像分類 | 画像にラベルを割り当てる | コンピュータビジョン | pipeline(task="image-classification") | | 画像セグメンテーション | 画像の各個別のピクセルにラベルを割り当てる(セマンティック、パノプティック、およびインスタンスセグメンテーションをサポート) | コンピュータビジョン | pipeline(task="image-segmentation") | | オブジェクト検出 | 画像内のオブジェクトの境界ボックスとクラスを予測する | コンピュータビジョン | pipeline(task="object-detection") | | オーディオ分類 | オーディオデータにラベルを割り当てる | オーディオ | pipeline(task="audio-classification") | | 自動音声認識 | 音声をテキストに変換する | オーディオ | pipeline(task="automatic-speech-recognition") | | ビジュアルクエスチョン応答 | 画像と質問が与えられた場合に、画像に関する質問に回答する | マルチモーダル | pipeline(task="vqa") | | ドキュメントクエスチョン応答 | ドキュメントと質問が与えられた場合に、ドキュメントに関する質問に回答する | マルチモーダル | pipeline(task="document-question-answering") | | 画像キャプショニング | 与えられた画像にキャプションを生成する | マルチモーダル | pipeline(task="image-to-text") | まず、[`pipeline`] のインスタンスを作成し、使用したいタスクを指定します。 このガイドでは、センチメント分析のために [`pipeline`] を使用する例を示します: ```python >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` [`pipeline`]は、感情分析のためのデフォルトの[事前学習済みモデル](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english)とトークナイザをダウンロードしてキャッシュし、使用できるようになります。 これで、`classifier`を対象のテキストに使用できます: ```python >>> classifier("私たちは🤗 Transformersライブラリをお見せできてとても嬉しいです。") [{'label': 'POSITIVE', 'score': 0.9998}] ``` 複数の入力がある場合は、[`pipeline`]に入力をリストとして渡して、辞書のリストを返します: ```py >>> results = classifier(["🤗 Transformersライブラリをご紹介できて非常に嬉しいです。", "嫌いにならないでほしいです。"]) >>> for result in results: ... print(f"label: {result['label']}, スコア: {round(result['score'], 4)}") label: POSITIVE, スコア: 0.9998 label: NEGATIVE, スコア: 0.5309 ``` [`pipeline`]は、任意のタスクに対してデータセット全体を繰り返し処理することもできます。この例では、自動音声認識をタスクとして選びましょう: ```python >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` オーディオデータセットをロードします(詳細については🤗 Datasets [クイックスタート](https://huggingface.co/docs/datasets/quickstart#audio)を参照してください)。 たとえば、[MInDS-14](https://huggingface.co/datasets/PolyAI/minds14)データセットをロードします: ```python >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` データセットのサンプリングレートが[`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h)がトレーニングされたサンプリングレートと一致することを確認してください: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` "audio"列を呼び出すと、オーディオファイルは自動的にロードされ、リサンプリングされます。最初の4つのサンプルから生の波形配列を抽出し、それをパイプラインにリストとして渡します。 ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I FURN A JOINA COUT'] ``` 大規模なデータセットで、入力が大きい場合(音声や画像など)、すべての入力をメモリに読み込む代わりに、リストではなくジェネレータを渡すことがお勧めです。詳細については[パイプラインAPIリファレンス](./main_classes/pipelines)を参照してください。 ### Use another model and tokenizer in the pipeline [`pipeline`]は[Hub](https://huggingface.co/models)からの任意のモデルを収容でき、他のユースケースに[`pipeline`]を適応させることが容易です。たとえば、フランス語のテキストを処理できるモデルが必要な場合、Hubのタグを使用して適切なモデルをフィルタリングできます。トップのフィルタリングされた結果は、フランス語のテキストに使用できる感情分析用に調整された多言語の[BERTモデル](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment)を返します: ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> [`AutoModelForSequenceClassification`]と[`AutoTokenizer`]を使用して事前学習済みモデルとそれに関連するトークナイザをロードします(次のセクションで`AutoClass`について詳しく説明します): ```python >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> 以下のコードは、[`TFAutoModelForSequenceClassification`]および[`AutoTokenizer`]を使用して、事前学習済みモデルとその関連するトークナイザをロードする方法を示しています(`TFAutoClass`については次のセクションで詳しく説明します): ```python >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> 指定したモデルとトークナイザを[`pipeline`]に設定し、今度はフランス語のテキストに`classifier`を適用できます: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` もし、あなたのユースケースに適したモデルが見つからない場合、事前学習済みモデルをあなたのデータでファインチューニングする必要があります。 ファインチューニングの方法については、[ファインチューニングのチュートリアル](./training)をご覧ください。 最後に、ファインチューニングした事前学習済みモデルを共有し、コミュニティと共有ハブで共有することを検討してください。これにより、機械学習を民主化する手助けができます! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> [`AutoModelForSequenceClassification`] および [`AutoTokenizer`] クラスは、上記で使用した [`pipeline`] を駆動するために協力して動作します。 [AutoClass](./model_doc/auto) は、事前学習済みモデルのアーキテクチャをその名前またはパスから自動的に取得するショートカットです。 適切な `AutoClass` を選択し、それに関連する前処理クラスを選択するだけで済みます。 前のセクションからの例に戻り、`AutoClass` を使用して [`pipeline`] の結果を再現する方法を見てみましょう。 ### AutoTokenizer トークナイザはテキストをモデルの入力として使用できる数値の配列に前処理する役割を果たします。 トークナイゼーションプロセスには、単語をどのように分割するかや、単語をどのレベルで分割するかといった多くのルールがあります (トークナイゼーションについての詳細は [トークナイザサマリー](./tokenizer_summary) をご覧ください)。 最も重要なことは、モデルが事前学習済みになったときと同じトークナイゼーションルールを使用するために、同じモデル名でトークナイザをインスタンス化する必要があることです。 [`AutoTokenizer`] を使用してトークナイザをロードします: ```python >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Pass your text to the tokenizer: ```python >>> encoding = tokenizer("私たちは🤗 Transformersライブラリをお見せできてとても嬉しいです。") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` トークナイザは、次の情報を含む辞書を返します: - [input_ids](./glossary#input-ids): トークンの数値表現。 - [attention_mask](.glossary#attention-mask): どのトークンにアテンションを向けるかを示します。 トークナイザはまた、入力のリストを受け入れ、一様な長さのバッチを返すためにテキストをパディングおよび切り詰めることができます。 <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["🤗 Transformersライブラリをお見せできて非常に嬉しいです。", "嫌いではないことを願っています。"], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> <Tip> [前処理](./preprocessing)チュートリアルをご覧いただき、トークナイゼーションの詳細や、[`AutoImageProcessor`]、[`AutoFeatureExtractor`]、[`AutoProcessor`]を使用して画像、オーディオ、およびマルチモーダル入力を前処理する方法について詳しく説明されているページもご覧ください。 </Tip> ### AutoModel <frameworkcontent> <pt> 🤗 Transformersは事前学習済みインスタンスを簡単に統一的にロードする方法を提供します。 これは、[`AutoTokenizer`]をロードするのと同じように[`AutoModel`]をロードできることを意味します。 タスクに適した[`AutoModel`]を選択する以外の違いはありません。 テキスト(またはシーケンス)分類の場合、[`AutoModelForSequenceClassification`]をロードする必要があります: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> [`AutoModel`]クラスでサポートされているタスクに関する詳細については、[タスクの概要](./task_summary)を参照してください。 </Tip> 今、前処理済みのバッチを直接モデルに渡します。辞書を展開するだけで、`**`を追加する必要があります: ```python >>> pt_outputs = pt_model(**pt_batch) ``` モデルは、`logits`属性に最終的なアクティベーションを出力します。 `logits`にsoftmax関数を適用して確率を取得します: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformersは事前学習済みインスタンスをロードするためのシンプルで統一された方法を提供します。 これは、[`TFAutoModel`]を[`AutoTokenizer`]をロードするのと同じようにロードできることを意味します。 唯一の違いは、タスクに適した[`TFAutoModel`]を選択することです。 テキスト(またはシーケンス)分類の場合、[`TFAutoModelForSequenceClassification`]をロードする必要があります: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> 詳細については、[`AutoModel`]クラスでサポートされているタスクに関する情報は、[タスクの概要](./task_summary)を参照してください。 </Tip> 次に、前処理済みのバッチを直接モデルに渡します。テンソルをそのまま渡すことができます: ```python >>> tf_outputs = tf_model(tf_batch) ``` モデルは`logits`属性に最終的なアクティベーションを出力します。`logits`にソフトマックス関数を適用して確率を取得します: ```python >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> 🤗 Transformersのすべてのモデル(PyTorchまたはTensorFlow)は、最終的な活性化関数(softmaxなど)*前*のテンソルを出力します。 最終的な活性化関数は、しばしば損失と結合されているためです。モデルの出力は特別なデータクラスであり、その属性はIDEで自動補完されます。 モデルの出力は、タプルまたは辞書のように動作します(整数、スライス、または文字列でインデックスを付けることができます)。 この場合、Noneである属性は無視されます。 </Tip> ### Save a Model <frameworkcontent> <pt> モデルをファインチューニングしたら、[`PreTrainedModel.save_pretrained`]を使用してトークナイザと共に保存できます: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` 再びモデルを使用する準備ができたら、[`PreTrainedModel.from_pretrained`]を使用して再度ロードします: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> モデルをファインチューニングしたら、そのトークナイザを使用してモデルを保存できます。[`TFPreTrainedModel.save_pretrained`]を使用します: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` モデルを再度使用する準備ができたら、[`TFPreTrainedModel.from_pretrained`]を使用して再度ロードします: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> 🤗 Transformersの特に素晴らしい機能の一つは、モデルを保存し、それをPyTorchモデルまたはTensorFlowモデルとして再ロードできることです。 `from_pt`または`from_tf`パラメータを使用してモデルをフレームワーク間で変換できます: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </tf> </frameworkcontent> ## Custom model builds モデルを構築方法を変更するには、モデルの設定クラスを変更できます。設定はモデルの属性を指定します。例えば、隠れ層の数やアテンションヘッドの数などがこれに含まれます。カスタム設定クラスからモデルを初期化する際には、ゼロから始めます。モデルの属性はランダムに初期化され、有意義な結果を得るためにモデルをトレーニングする必要があります。 最初に[`AutoConfig`]をインポートし、変更したい事前学習済みモデルをロードします。[`AutoConfig.from_pretrained`]内で、変更したい属性(例:アテンションヘッドの数)を指定できます: ```python >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> [`AutoModel.from_config`]を使用してカスタム設定からモデルを作成します: ```python >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> カスタム構成からモデルを作成するには、[`TFAutoModel.from_config`]を使用します: ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> [カスタムアーキテクチャを作成](./create_a_model)ガイドを参照して、カスタム構成の詳細情報を確認してください。 ## Trainer - PyTorch向けの最適化されたトレーニングループ すべてのモデルは標準の[`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)であるため、通常のトレーニングループで使用できます。 独自のトレーニングループを作成できますが、🤗 TransformersはPyTorch向けに[`Trainer`]クラスを提供しており、基本的なトレーニングループに加えて、 分散トレーニング、混合精度などの機能の追加を行っています。 タスクに応じて、通常は[`Trainer`]に以下のパラメータを渡します: 1. [`PreTrainedModel`]または[`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)から始めます: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. [`TrainingArguments`]には、変更できるモデルのハイパーパラメータが含まれており、学習率、バッチサイズ、トレーニングエポック数などが変更できます。指定しない場合、デフォルト値が使用されます: ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="path/to/save/folder/", ... learning_rate=2e-5, ... per_device_train_batch_size=8, ... per_device_eval_batch_size=8, ... num_train_epochs=2, ... ) ``` 3. トークナイザ、画像プロセッサ、特徴量抽出器、またはプロセッサのような前処理クラスをロードします: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 4. データセットをロードする: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT ``` 5. データセットをトークン化するための関数を作成します: ```python >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) ``` その後、[`~datasets.Dataset.map`]を使用してデータセット全体に適用します: ```python >>> dataset = dataset.map(tokenize_dataset, batched=True) ``` 6. データセットからの例のバッチを作成するための [`DataCollatorWithPadding`]: ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` 次に、これらのクラスを[`Trainer`]にまとめます: ```python >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... processing_class=tokenizer, ... data_collator=data_collator, ... ) # doctest: +SKIP ``` 訓練を開始する準備ができたら、[`~Trainer.train`]を呼び出してトレーニングを開始します: ```py >>> trainer.train() # doctest: +SKIP ``` <Tip> 翻訳や要約など、シーケンス間モデルを使用するタスクには、代わりに[`Seq2SeqTrainer`]と[`Seq2SeqTrainingArguments`]クラスを使用してください。 </Tip> [`Trainer`]内のメソッドをサブクラス化することで、トレーニングループの動作をカスタマイズできます。これにより、損失関数、オプティマイザ、スケジューラなどの機能をカスタマイズできます。サブクラス化できるメソッドの一覧については、[`Trainer`]リファレンスをご覧ください。 トレーニングループをカスタマイズする別の方法は、[Callbacks](./main_classes/callback)を使用することです。コールバックを使用して他のライブラリと統合し、トレーニングループを監視して進捗状況を報告したり、トレーニングを早期に停止したりできます。コールバックはトレーニングループ自体には何も変更を加えません。損失関数などのカスタマイズを行う場合は、[`Trainer`]をサブクラス化する必要があります。 ## Train with TensorFlow すべてのモデルは標準の[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)であるため、[Keras](https://keras.io/) APIを使用してTensorFlowでトレーニングできます。 🤗 Transformersは、データセットを`tf.data.Dataset`として簡単にロードできるようにする[`~TFPreTrainedModel.prepare_tf_dataset`]メソッドを提供しており、Kerasの[`compile`](https://keras.io/api/models/model_training_apis/#compile-method)および[`fit`](https://keras.io/api/models/model_training_apis/#fit-method)メソッドを使用してトレーニングをすぐに開始できます。 1. [`TFPreTrainedModel`]または[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)から始めます: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. トークナイザ、画像プロセッサ、特徴量抽出器、またはプロセッサのような前処理クラスをロードします: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 3. データセットをトークナイズするための関数を作成します: ```python >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) # doctest: +SKIP ``` 4. [`~datasets.Dataset.map`]を使用してデータセット全体にトークナイザを適用し、データセットとトークナイザを[`~TFPreTrainedModel.prepare_tf_dataset`]に渡します。バッチサイズを変更し、データセットをシャッフルすることもできます。 ```python >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP >>> tf_dataset = model.prepare_tf_dataset( ... dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer ... ) # doctest: +SKIP ``` 5. 準備ができたら、`compile`と`fit`を呼び出してトレーニングを開始できます。 Transformersモデルはすべてデフォルトのタスクに関連する損失関数を持っているため、指定しない限り、損失関数を指定する必要はありません。 ```python >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer=Adam(3e-5)) # 損失関数の引数は不要です! >>> model.fit(tf ``` ## What's next? 🤗 Transformersのクイックツアーを完了したら、ガイドをチェックして、カスタムモデルの作成、タスクのためのファインチューニング、スクリプトを使用したモデルのトレーニングなど、より具体的なことを学ぶことができます。🤗 Transformersのコアコンセプトについてもっと詳しく知りたい場合は、コンセプチュアルガイドを読んでみてください!
transformers/docs/source/ja/quicktour.md/0
{ "file_path": "transformers/docs/source/ja/quicktour.md", "repo_id": "transformers", "token_count": 13067 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Object detection [[open-in-colab]] オブジェクト検出は、画像内のインスタンス (人間、建物、車など) を検出するコンピューター ビジョン タスクです。物体検出モデルは画像を入力および出力として受け取ります 検出されたオブジェクトの境界ボックスと関連するラベルの座標。画像には複数のオブジェクトを含めることができます。 それぞれに独自の境界ボックスとラベルがあり (例: 車と建物を持つことができます)、各オブジェクトは 画像のさまざまな部分に存在する必要があります (たとえば、画像には複数の車が含まれている可能性があります)。 このタスクは、歩行者、道路標識、信号機などを検出するために自動運転で一般的に使用されます。 他のアプリケーションには、画像内のオブジェクトのカウント、画像検索などが含まれます。 このガイドでは、次の方法を学習します。 1. Finetune [DETR](https://huggingface.co/docs/transformers/model_doc/detr)、畳み込みアルゴリズムを組み合わせたモデル [CPPE-5](https://huggingface.co/datasets/cppe-5) 上のエンコーダー/デコーダー トランスフォーマーを備えたバックボーン データセット。 2. 微調整したモデルを推論に使用します。 <Tip> このタスクと互換性のあるすべてのアーキテクチャとチェックポイントを確認するには、[タスクページ](https://huggingface.co/tasks/object-detection) を確認することをお勧めします。 </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install -q datasets transformers evaluate timm albumentations ``` 🤗 データセットを使用して Hugging Face Hub からデータセットをロードし、🤗 トランスフォーマーを使用してモデルをトレーニングします。 データを増強するための`albumentations`。 `timm` は現在、DETR モデルの畳み込みバックボーンをロードするために必要です。 モデルをコミュニティと共有することをお勧めします。 Hugging Face アカウントにログインして、ハブにアップロードします。 プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load the CPPE-5 dataset [CPPE-5 データセット](https://huggingface.co/datasets/cppe-5) には、次の画像が含まれています。 新型コロナウイルス感染症のパンデミックにおける医療用個人保護具 (PPE) を識別する注釈。 データセットをロードすることから始めます。 ```py >>> from datasets import load_dataset >>> cppe5 = load_dataset("cppe-5") >>> cppe5 DatasetDict({ train: Dataset({ features: ['image_id', 'image', 'width', 'height', 'objects'], num_rows: 1000 }) test: Dataset({ features: ['image_id', 'image', 'width', 'height', 'objects'], num_rows: 29 }) }) ``` このデータセットには、1000 枚の画像を含むトレーニング セットと 29 枚の画像を含むテスト セットがすでに付属していることがわかります。 データに慣れるために、例がどのようなものかを調べてください。 ```py >>> cppe5["train"][0] {'image_id': 15, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=943x663 at 0x7F9EC9E77C10>, 'width': 943, 'height': 663, 'objects': {'id': [114, 115, 116, 117], 'area': [3796, 1596, 152768, 81002], 'bbox': [[302.0, 109.0, 73.0, 52.0], [810.0, 100.0, 57.0, 28.0], [160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], 'category': [4, 4, 0, 0]}} ``` データセット内の例には次のフィールドがあります。 - `image_id`: サンプルの画像ID - `image`: 画像を含む `PIL.Image.Image` オブジェクト - `width`: 画像の幅 - `height`: 画像の高さ - `objects`: 画像内のオブジェクトの境界ボックスのメタデータを含む辞書: - `id`: アノテーションID - `area`: 境界ボックスの領域 - `bbox`: オブジェクトの境界ボックス ([COCO 形式](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) ) - `category`: オブジェクトのカテゴリー。可能な値には、`Coverall (0)`、`Face_Shield (1)`、`Gloves (2)`、`Goggles (3)`、および `Mask (4)` が含まれます。 `bbox`フィールドが COCO 形式に従っていることに気づくかもしれません。これは DETR モデルが予期する形式です。 ただし、「オブジェクト」内のフィールドのグループ化は、DETR が必要とする注釈形式とは異なります。あなたはするであろう このデータをトレーニングに使用する前に、いくつかの前処理変換を適用する必要があります。 データをさらに深く理解するには、データセット内の例を視覚化します。 ```py >>> import numpy as np >>> import os >>> from PIL import Image, ImageDraw >>> image = cppe5["train"][0]["image"] >>> annotations = cppe5["train"][0]["objects"] >>> draw = ImageDraw.Draw(image) >>> categories = cppe5["train"].features["objects"].feature["category"].names >>> id2label = {index: x for index, x in enumerate(categories, start=0)} >>> label2id = {v: k for k, v in id2label.items()} >>> for i in range(len(annotations["id"])): ... box = annotations["bbox"][i] ... class_idx = annotations["category"][i] ... x, y, w, h = tuple(box) ... draw.rectangle((x, y, x + w, y + h), outline="red", width=1) ... draw.text((x, y), id2label[class_idx], fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://i.imgur.com/TdaqPJO.png" alt="CPPE-5 Image Example"/> </div> 関連付けられたラベルを使用して境界ボックスを視覚化するには、データセットのメタデータからラベルを取得します。 `category`フィールド。 また、ラベル ID をラベル クラスにマッピングする辞書 (`id2label`) やその逆 (`label2id`) を作成することもできます。 これらは、後でモデルをセットアップするときに使用できます。これらのマップを含めると、共有した場合に他の人がモデルを再利用できるようになります。 ハグフェイスハブに取り付けます。 データに慣れるための最後のステップとして、潜在的な問題がないかデータを調査します。データセットに関する一般的な問題の 1 つは、 オブジェクト検出は、画像の端を越えて「伸びる」境界ボックスです。このような「暴走」境界ボックスは、 トレーニング中にエラーが発生するため、この段階で対処する必要があります。このデータセットには、この問題に関する例がいくつかあります。 このガイドでは内容をわかりやすくするために、これらの画像をデータから削除します。 ```py >>> remove_idx = [590, 821, 822, 875, 876, 878, 879] >>> keep = [i for i in range(len(cppe5["train"])) if i not in remove_idx] >>> cppe5["train"] = cppe5["train"].select(keep) ``` ## Preprocess the data モデルを微調整するには、事前トレーニングされたモデルに使用されるアプローチと正確に一致するように、使用する予定のデータを前処理する必要があります。 [`AutoImageProcessor`] は、画像データを処理して `pixel_values`、`pixel_mask`、および DETR モデルをトレーニングできる「ラベル」。画像プロセッサには、心配する必要のないいくつかの属性があります。 - `image_mean = [0.485, 0.456, 0.406 ]` - `image_std = [0.229, 0.224, 0.225]` これらは、モデルの事前トレーニング中に画像を正規化するために使用される平均と標準偏差です。これらの価値観は非常に重要です 事前にトレーニングされた画像モデルを推論または微調整するときに複製します。 微調整するモデルと同じチェックポイントからイメージ プロセッサをインスタンス化します。 ```py >>> from transformers import AutoImageProcessor >>> checkpoint = "facebook/detr-resnet-50" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) ``` 画像を`image_processor`に渡す前に、2 つの前処理変換をデータセットに適用します。 - 画像の拡張 - DETR の期待に応えるための注釈の再フォーマット まず、モデルがトレーニング データにオーバーフィットしないようにするために、任意のデータ拡張ライブラリを使用して画像拡張を適用できます。ここでは[Albumentations](https://albumentations.ai/docs/)を使用します... このライブラリは、変換が画像に影響を与え、それに応じて境界ボックスを更新することを保証します。 🤗 データセット ライブラリのドキュメントには、詳細な [物体検出用に画像を拡張する方法に関するガイド](https://huggingface.co/docs/datasets/object_detection) が記載されています。 例としてまったく同じデータセットを使用しています。ここでも同じアプローチを適用し、各画像のサイズを (480, 480) に変更します。 水平に反転して明るくします。 ```py >>> import albumentations >>> import numpy as np >>> import torch >>> transform = albumentations.Compose( ... [ ... albumentations.Resize(480, 480), ... albumentations.HorizontalFlip(p=1.0), ... albumentations.RandomBrightnessContrast(p=1.0), ... ], ... bbox_params=albumentations.BboxParams(format="coco", label_fields=["category"]), ... ) ``` `image_processor` は、注釈が次の形式であることを期待します: `{'image_id': int, 'annotations': List[Dict]}`, ここで、各辞書は COCO オブジェクトの注釈です。 1 つの例として、注釈を再フォーマットする関数を追加してみましょう。 ```py >>> def formatted_anns(image_id, category, area, bbox): ... annotations = [] ... for i in range(0, len(category)): ... new_ann = { ... "image_id": image_id, ... "category_id": category[i], ... "isCrowd": 0, ... "area": area[i], ... "bbox": list(bbox[i]), ... } ... annotations.append(new_ann) ... return annotations ``` これで、画像と注釈の変換を組み合わせてサンプルのバッチで使用できるようになりました。 ```py >>> # transforming a batch >>> def transform_aug_ann(examples): ... image_ids = examples["image_id"] ... images, bboxes, area, categories = [], [], [], [] ... for image, objects in zip(examples["image"], examples["objects"]): ... image = np.array(image.convert("RGB"))[:, :, ::-1] ... out = transform(image=image, bboxes=objects["bbox"], category=objects["category"]) ... area.append(objects["area"]) ... images.append(out["image"]) ... bboxes.append(out["bboxes"]) ... categories.append(out["category"]) ... targets = [ ... {"image_id": id_, "annotations": formatted_anns(id_, cat_, ar_, box_)} ... for id_, cat_, ar_, box_ in zip(image_ids, categories, area, bboxes) ... ] ... return image_processor(images=images, annotations=targets, return_tensors="pt") ``` 🤗 Datasets [`~datasets.Dataset.with_transform`] メソッドを使用して、この前処理関数をデータセット全体に適用します。この方法が適用されるのは、 データセットの要素を読み込むときに、その場で変換します。 この時点で、データセットの例が変換後にどのようになるかを確認できます。テンソルが表示されるはずです `pixel_values`、テンソルと `pixel_mask`、および `labels` を使用します。 ```py >>> cppe5["train"] = cppe5["train"].with_transform(transform_aug_ann) >>> cppe5["train"][15] {'pixel_values': tensor([[[ 0.9132, 0.9132, 0.9132, ..., -1.9809, -1.9809, -1.9809], [ 0.9132, 0.9132, 0.9132, ..., -1.9809, -1.9809, -1.9809], [ 0.9132, 0.9132, 0.9132, ..., -1.9638, -1.9638, -1.9638], ..., [-1.5699, -1.5699, -1.5699, ..., -1.9980, -1.9980, -1.9980], [-1.5528, -1.5528, -1.5528, ..., -1.9980, -1.9809, -1.9809], [-1.5528, -1.5528, -1.5528, ..., -1.9980, -1.9809, -1.9809]], [[ 1.3081, 1.3081, 1.3081, ..., -1.8431, -1.8431, -1.8431], [ 1.3081, 1.3081, 1.3081, ..., -1.8431, -1.8431, -1.8431], [ 1.3081, 1.3081, 1.3081, ..., -1.8256, -1.8256, -1.8256], ..., [-1.3179, -1.3179, -1.3179, ..., -1.8606, -1.8606, -1.8606], [-1.3004, -1.3004, -1.3004, ..., -1.8606, -1.8431, -1.8431], [-1.3004, -1.3004, -1.3004, ..., -1.8606, -1.8431, -1.8431]], [[ 1.4200, 1.4200, 1.4200, ..., -1.6476, -1.6476, -1.6476], [ 1.4200, 1.4200, 1.4200, ..., -1.6476, -1.6476, -1.6476], [ 1.4200, 1.4200, 1.4200, ..., -1.6302, -1.6302, -1.6302], ..., [-1.0201, -1.0201, -1.0201, ..., -1.5604, -1.5604, -1.5604], [-1.0027, -1.0027, -1.0027, ..., -1.5604, -1.5430, -1.5430], [-1.0027, -1.0027, -1.0027, ..., -1.5604, -1.5430, -1.5430]]]), 'pixel_mask': tensor([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1], ..., [1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]]), 'labels': {'size': tensor([800, 800]), 'image_id': tensor([756]), 'class_labels': tensor([4]), 'boxes': tensor([[0.7340, 0.6986, 0.3414, 0.5944]]), 'area': tensor([519544.4375]), 'iscrowd': tensor([0]), 'orig_size': tensor([480, 480])}} ``` 個々の画像を正常に拡張し、それらの注釈を準備しました。ただし、前処理はそうではありません。 まだ完成しています。最後のステップでは、画像をバッチ処理するためのカスタム `collat​​e_fn` を作成します。 画像 (現在は `pixel_values`) をバッチ内の最大の画像にパディングし、対応する `pixel_mask` を作成します どのピクセルが実数 (1) で、どのピクセルがパディング (0) であるかを示します。 ```py >>> def collate_fn(batch): ... pixel_values = [item["pixel_values"] for item in batch] ... encoding = image_processor.pad(pixel_values, return_tensors="pt") ... labels = [item["labels"] for item in batch] ... batch = {} ... batch["pixel_values"] = encoding["pixel_values"] ... batch["pixel_mask"] = encoding["pixel_mask"] ... batch["labels"] = labels ... return batch ``` ## Training the DETR model 前のセクションで重労働のほとんどを完了したので、モデルをトレーニングする準備が整いました。 このデータセット内の画像は、サイズを変更した後でも依然として非常に大きいです。これは、このモデルを微調整すると、 少なくとも 1 つの GPU が必要です。 トレーニングには次の手順が含まれます。 1. 前処理と同じチェックポイントを使用して、[`AutoModelForObjectDetection`] でモデルを読み込みます。 2. [`TrainingArguments`] でトレーニング ハイパーパラメータを定義します。 3. トレーニング引数をモデル、データセット、画像プロセッサ、データ照合器とともに [`Trainer`] に渡します。 4. [`~Trainer.train`] を呼び出してモデルを微調整します。 前処理に使用したのと同じチェックポイントからモデルをロードするときは、必ず`label2id`を渡してください。 および `id2label` マップは、以前にデータセットのメタデータから作成したものです。さらに、`ignore_mismatched_sizes=True`を指定して、既存の分類頭部を新しい分類頭部に置き換えます。 ```py >>> from transformers import AutoModelForObjectDetection >>> model = AutoModelForObjectDetection.from_pretrained( ... checkpoint, ... id2label=id2label, ... label2id=label2id, ... ignore_mismatched_sizes=True, ... ) ``` [`TrainingArguments`] で、`output_dir` を使用してモデルの保存場所を指定し、必要に応じてハイパーパラメーターを構成します。 画像列が削除されるため、未使用の列を削除しないことが重要です。画像列がないと、 `pixel_values` を作成できません。このため、`remove_unused_columns`を`False`に設定します。 ハブにプッシュしてモデルを共有したい場合は、`push_to_hub` を `True` に設定します (Hugging にサインインする必要があります) 顔に向かってモデルをアップロードします)。 ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="detr-resnet-50_finetuned_cppe5", ... per_device_train_batch_size=8, ... num_train_epochs=10, ... fp16=True, ... save_steps=200, ... logging_steps=50, ... learning_rate=1e-5, ... weight_decay=1e-4, ... save_total_limit=2, ... remove_unused_columns=False, ... push_to_hub=True, ... ) ``` 最後に、すべてをまとめて、[`~transformers.Trainer.train`] を呼び出します。 ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... data_collator=collate_fn, ... train_dataset=cppe5["train"], ... processing_class=image_processor, ... ) >>> trainer.train() ``` `training_args`で`push_to_hub`を`True`に設定した場合、トレーニング チェックポイントは ハグフェイスハブ。トレーニングが完了したら、[`~transformers.Trainer.push_to_hub`] メソッドを呼び出して、最終モデルもハブにプッシュします。 ```py >>> trainer.push_to_hub() ``` ## Evaluate 物体検出モデルは通常、一連の <a href="https://cocodataset.org/#detection-eval">COCO スタイルの指標</a>を使用して評価されます。 既存のメトリクス実装のいずれかを使用できますが、ここでは`torchvision`のメトリクス実装を使用して最終的なメトリクスを評価します。 ハブにプッシュしたモデル。 `torchvision`エバリュエーターを使用するには、グラウンド トゥルース COCO データセットを準備する必要があります。 COCO データセットを構築するための API データを特定の形式で保存する必要があるため、最初に画像と注釈をディスクに保存する必要があります。と同じように トレーニング用にデータを準備するとき、`cppe5["test"]` からの注釈をフォーマットする必要があります。ただし、画像 そのままでいるべきです。 評価ステップには少し作業が必要ですが、大きく 3 つのステップに分けることができます。 まず、`cppe5["test"]` セットを準備します。注釈をフォーマットし、データをディスクに保存します。 ```py >>> import json >>> # format annotations the same as for training, no need for data augmentation >>> def val_formatted_anns(image_id, objects): ... annotations = [] ... for i in range(0, len(objects["id"])): ... new_ann = { ... "id": objects["id"][i], ... "category_id": objects["category"][i], ... "iscrowd": 0, ... "image_id": image_id, ... "area": objects["area"][i], ... "bbox": objects["bbox"][i], ... } ... annotations.append(new_ann) ... return annotations >>> # Save images and annotations into the files torchvision.datasets.CocoDetection expects >>> def save_cppe5_annotation_file_images(cppe5): ... output_json = {} ... path_output_cppe5 = f"{os.getcwd()}/cppe5/" ... if not os.path.exists(path_output_cppe5): ... os.makedirs(path_output_cppe5) ... path_anno = os.path.join(path_output_cppe5, "cppe5_ann.json") ... categories_json = [{"supercategory": "none", "id": id, "name": id2label[id]} for id in id2label] ... output_json["images"] = [] ... output_json["annotations"] = [] ... for example in cppe5: ... ann = val_formatted_anns(example["image_id"], example["objects"]) ... output_json["images"].append( ... { ... "id": example["image_id"], ... "width": example["image"].width, ... "height": example["image"].height, ... "file_name": f"{example['image_id']}.png", ... } ... ) ... output_json["annotations"].extend(ann) ... output_json["categories"] = categories_json ... with open(path_anno, "w") as file: ... json.dump(output_json, file, ensure_ascii=False, indent=4) ... for im, img_id in zip(cppe5["image"], cppe5["image_id"]): ... path_img = os.path.join(path_output_cppe5, f"{img_id}.png") ... im.save(path_img) ... return path_output_cppe5, path_anno ``` 次に、`cocoevaluator`で利用できる`CocoDetection`クラスのインスタンスを用意します。 ```py >>> import torchvision >>> class CocoDetection(torchvision.datasets.CocoDetection): ... def __init__(self, img_folder, image_processor, ann_file): ... super().__init__(img_folder, ann_file) ... self.image_processor = image_processor ... def __getitem__(self, idx): ... # read in PIL image and target in COCO format ... img, target = super(CocoDetection, self).__getitem__(idx) ... # preprocess image and target: converting target to DETR format, ... # resizing + normalization of both image and target) ... image_id = self.ids[idx] ... target = {"image_id": image_id, "annotations": target} ... encoding = self.image_processor(images=img, annotations=target, return_tensors="pt") ... pixel_values = encoding["pixel_values"].squeeze() # remove batch dimension ... target = encoding["labels"][0] # remove batch dimension ... return {"pixel_values": pixel_values, "labels": target} >>> im_processor = AutoImageProcessor.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> path_output_cppe5, path_anno = save_cppe5_annotation_file_images(cppe5["test"]) >>> test_ds_coco_format = CocoDetection(path_output_cppe5, im_processor, path_anno) ``` 最後に、メトリクスをロードして評価を実行します。 ```py >>> import evaluate >>> from tqdm import tqdm >>> model = AutoModelForObjectDetection.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> module = evaluate.load("ybelkada/cocoevaluate", coco=test_ds_coco_format.coco) >>> val_dataloader = torch.utils.data.DataLoader( ... test_ds_coco_format, batch_size=8, shuffle=False, num_workers=4, collate_fn=collate_fn ... ) >>> with torch.no_grad(): ... for idx, batch in enumerate(tqdm(val_dataloader)): ... pixel_values = batch["pixel_values"] ... pixel_mask = batch["pixel_mask"] ... labels = [ ... {k: v for k, v in t.items()} for t in batch["labels"] ... ] # these are in DETR format, resized + normalized ... # forward pass ... outputs = model(pixel_values=pixel_values, pixel_mask=pixel_mask) ... orig_target_sizes = torch.stack([target["orig_size"] for target in labels], dim=0) ... results = im_processor.post_process(outputs, orig_target_sizes) # convert outputs of model to Pascal VOC format (xmin, ymin, xmax, ymax) ... module.add(prediction=results, reference=labels) ... del batch >>> results = module.compute() >>> print(results) Accumulating evaluation results... DONE (t=0.08s). IoU metric: bbox Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.352 Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.681 Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.292 Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.168 Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.208 Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.429 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.274 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.484 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501 Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.191 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.323 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.590 ``` これらの結果は、[`~transformers.TrainingArguments`] のハイパーパラメータを調整することでさらに改善できます。試してごらん! ## Inference DETR モデルを微調整して評価し、Hugging Face Hub にアップロードしたので、それを推論に使用できます。 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`] で使用することです。パイプラインをインスタンス化する モデルを使用してオブジェクトを検出し、それに画像を渡します。 ```py >>> from transformers import pipeline >>> import requests >>> url = "https://i.imgur.com/2lnWoly.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> obj_detector = pipeline("object-detection", model="devonho/detr-resnet-50_finetuned_cppe5") >>> obj_detector(image) ``` 必要に応じて、パイプラインの結果を手動で複製することもできます。 ```py >>> image_processor = AutoImageProcessor.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> model = AutoModelForObjectDetection.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> with torch.no_grad(): ... inputs = image_processor(images=image, return_tensors="pt") ... outputs = model(**inputs) ... target_sizes = torch.tensor([image.size[::-1]]) ... results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[0] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected Coverall with confidence 0.566 at location [1215.32, 147.38, 4401.81, 3227.08] Detected Mask with confidence 0.584 at location [2449.06, 823.19, 3256.43, 1413.9] ``` 結果をプロットしてみましょう: ```py >>> draw = ImageDraw.Draw(image) >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... x, y, x2, y2 = tuple(box) ... draw.rectangle((x, y, x2, y2), outline="red", width=1) ... draw.text((x, y), model.config.id2label[label.item()], fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://i.imgur.com/4QZnf9A.png" alt="Object detection result on a new image"/> </div>
transformers/docs/source/ja/tasks/object_detection.md/0
{ "file_path": "transformers/docs/source/ja/tasks/object_detection.md", "repo_id": "transformers", "token_count": 12694 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Export to TFLite [TensorFlow Lite](https://www.tensorflow.org/lite/guide)は、モバイルフォン、組み込みシステム、およびモノのインターネット(IoT)デバイスなど、リソースに制約のあるデバイスに機械学習モデルを展開するための軽量なフレームワークです。TFLiteは、計算能力、メモリ、および電力消費が限られているこれらのデバイス上でモデルを効率的に最適化して実行するために設計されています。 TensorFlow Liteモデルは、`.tflite`ファイル拡張子で識別される特別な効率的なポータブル形式で表されます。 🤗 Optimumは、🤗 TransformersモデルをTFLiteにエクスポートするための機能を`exporters.tflite`モジュールを介して提供しています。サポートされているモデルアーキテクチャのリストについては、[🤗 Optimumのドキュメント](https://huggingface.co/docs/optimum/exporters/tflite/overview)をご参照ください。 モデルをTFLiteにエクスポートするには、必要な依存関係をインストールしてください: ```bash pip install optimum[exporters-tf] ``` すべての利用可能な引数を確認するには、[🤗 Optimumドキュメント](https://huggingface.co/docs/optimum/main/en/exporters/tflite/usage_guides/export_a_model)を参照するか、コマンドラインでヘルプを表示してください: ```bash optimum-cli export tflite --help ``` 🤗 Hubからモデルのチェックポイントをエクスポートするには、例えば `google-bert/bert-base-uncased` を使用する場合、次のコマンドを実行します: ```bash optimum-cli export tflite --model google-bert/bert-base-uncased --sequence_length 128 bert_tflite/ ``` 進行状況を示すログが表示され、生成された `model.tflite` が保存された場所も表示されるはずです: ```bash Validating TFLite model... -[✓] TFLite model output names match reference model (logits) - Validating TFLite Model output "logits": -[✓] (1, 128, 30522) matches (1, 128, 30522) -[x] values not close enough, max diff: 5.817413330078125e-05 (atol: 1e-05) The TensorFlow Lite export succeeded with the warning: The maximum absolute difference between the output of the reference model and the TFLite exported model is not within the set tolerance 1e-05: - logits: max diff = 5.817413330078125e-05. The exported model was saved at: bert_tflite ``` 上記の例は🤗 Hubからチェックポイントをエクスポートする方法を示しています。ローカルモデルをエクスポートする場合、まずモデルの重みファイルとトークナイザファイルを同じディレクトリ(`local_path`)に保存したことを確認してください。CLIを使用する場合、🤗 Hubのチェックポイント名の代わりに`model`引数に`local_path`を渡します。
transformers/docs/source/ja/tflite.md/0
{ "file_path": "transformers/docs/source/ja/tflite.md", "repo_id": "transformers", "token_count": 1416 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 일반 유틸리티 (General Utilities) [[general-utilities]] 이 페이지는 `utils.py` 파일에 있는 Transformers의 일반 유틸리티 함수들을 나열합니다. 이 함수들 대부분은 라이브러리의 일반적인 코드를 연구할 때만 유용합니다. ## Enums 및 namedtuples [[transformers.utils.ExplicitEnum]] [[autodoc]] utils.ExplicitEnum [[autodoc]] utils.PaddingStrategy [[autodoc]] utils.TensorType ## 특수 데코레이터 (Special Decorators) [[transformers.add_start_docstrings]] [[autodoc]] utils.add_start_docstrings [[autodoc]] utils.add_start_docstrings_to_model_forward [[autodoc]] utils.add_end_docstrings [[autodoc]] utils.add_code_sample_docstrings [[autodoc]] utils.replace_return_docstrings ## 특수 속성 (Special Properties) [[transformers.utils.cached_property]] [[autodoc]] utils.cached_property ## 기타 유틸리티 [[transformers.utils._LazyModule]] [[autodoc]] utils._LazyModule
transformers/docs/source/ko/internal/file_utils.md/0
{ "file_path": "transformers/docs/source/ko/internal/file_utils.md", "repo_id": "transformers", "token_count": 665 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 특성 추출기 [[feature-extractor]] 특성 추출기는 오디오 또는 비전 모델을 위한 입력 특성을 준비하는 역할을 합니다. 여기에는 시퀀스에서 특성을 추출하는 작업(예를 들어, 오디오 파일을 전처리하여 Log-Mel 스펙트로그램 특성을 생성하는 것), 이미지에서 특성을 추출하는 작업(예를 들어, 이미지 파일을 자르는 것)이 포함됩니다. 뿐만 아니라 패딩, 정규화 및 NumPy, PyTorch, TensorFlow 텐서로의 변환도 포함됩니다. ## FeatureExtractionMixin [[transformers.FeatureExtractionMixin]] [[autodoc]] feature_extraction_utils.FeatureExtractionMixin - from_pretrained - save_pretrained ## SequenceFeatureExtractor [[transformers.SequenceFeatureExtractor]] [[autodoc]] SequenceFeatureExtractor - pad ## BatchFeature [[transformers.BatchFeature]] [[autodoc]] BatchFeature ## ImageFeatureExtractionMixin [[transformers.ImageFeatureExtractionMixin]] [[autodoc]] image_utils.ImageFeatureExtractionMixin
transformers/docs/source/ko/main_classes/feature_extractor.md/0
{ "file_path": "transformers/docs/source/ko/main_classes/feature_extractor.md", "repo_id": "transformers", "token_count": 779 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERT[[BERT]] <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=bert"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-bert-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/bert-base-uncased"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## 개요[[Overview]] BERT 모델은 Jacob Devlin. Ming-Wei Chang, Kenton Lee, Kristina Touranova가 제안한 논문 [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805)에서 소개되었습니다. BERT는 사전 학습된 양방향 트랜스포머로, Toronto Book Corpus와 Wikipedia로 구성된 대규모 코퍼스에서 마스킹된 언어 모델링과 다음 문장 예측(Next Sentence Prediction) 목표를 결합해 학습되었습니다. 해당 논문의 초록입니다: *우리는 BERT(Bidirectional Encoder Representations from Transformers)라는 새로운 언어 표현 모델을 소개합니다. 최근의 다른 언어 표현 모델들과 달리, BERT는 모든 계층에서 양방향으로 양쪽 문맥을 조건으로 사용하여 비지도 학습된 텍스트에서 깊이 있는 양방향 표현을 사전 학습하도록 설계되었습니다. 그 결과, 사전 학습된 BERT 모델은 추가적인 출력 계층 하나만으로 질문 응답, 언어 추론과 같은 다양한 작업에서 미세 조정될 수 있으므로, 특정 작업을 위해 아키텍처를 수정할 필요가 없습니다.* *BERT는 개념적으로 단순하면서도 실증적으로 강력한 모델입니다. BERT는 11개의 자연어 처리 과제에서 새로운 최고 성능을 달성했으며, GLUE 점수를 80.5% (7.7% 포인트 절대 개선)로, MultiNLI 정확도를 86.7% (4.6% 포인트 절대 개선), SQuAD v1.1 질문 응답 테스트에서 F1 점수를 93.2 (1.5% 포인트 절대 개선)로, SQuAD v2.0에서 F1 점수를 83.1 (5.1% 포인트 절대 개선)로 향상시켰습니다.* 이 모델은 [thomwolf](https://huggingface.co/thomwolf)가 기여하였습니다. 원본 코드는 [여기](https://github.com/google-research/bert)에서 확인할 수 있습니다. ## 사용 팁[[Usage tips]] - BERT는 절대 위치 임베딩을 사용하는 모델이므로 입력을 왼쪽이 아니라 오른쪽에서 패딩하는 것이 일반적으로 권장됩니다. - BERT는 마스킹된 언어 모델(MLM)과 Next Sentence Prediction(NSP) 목표로 학습되었습니다. 이는 마스킹된 토큰 예측과 전반적인 자연어 이해(NLU)에 뛰어나지만, 텍스트 생성에는 최적화되어있지 않습니다. - BERT의 사전 학습 과정에서는 입력 데이터를 무작위로 마스킹하여 일부 토큰을 마스킹합니다. 전체 토큰 중 약 15%가 다음과 같은 방식으로 마스킹됩니다: * 80% 확률로 마스크 토큰으로 대체 * 10% 확률로 임의의 다른 토큰으로 대체 * 10% 확률로 원래 토큰 그대로 유지 - 모델의 주요 목표는 원본 문장을 예측하는 것이지만, 두 번째 목표가 있습니다: 입력으로 문장 A와 B (사이에는 구분 토큰이 있음)가 주어집니다. 이 문장 쌍이 연속될 확률은 50%이며, 나머지 50%는 서로 무관한 문장들입니다. 모델은 이 두 문장이 아닌지를 예측해야 합니다. ### Scaled Dot Product Attention(SDPA) 사용하기 [[Using Scaled Dot Product Attention (SDPA)]] Pytorch는 `torch.nn.functional`의 일부로 Scaled Dot Product Attention(SDPA) 연산자를 기본적으로 제공합니다. 이 함수는 입력과 하드웨어에 따라 여러 구현 방식을 사용할 수 있습니다. 자세한 내용은 [공식 문서](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)나 [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)에서 확인할 수 있습니다. `torch>=2.1.1`에서는 구현이 가능한 경우 SDPA가 기본적으로 사용되지만, `from_pretrained()`함수에서 `attn_implementation="sdpa"`를 설정하여 SDPA를 명시적으로 사용하도록 지정할 수도 있습니다. ``` from transformers import BertModel model = BertModel.from_pretrained("bert-base-uncased", torch_dtype=torch.float16, attn_implementation="sdpa") ... ``` 최적 성능 향상을 위해 모델을 반정밀도(예: `torch.float16` 또는 `torch.bfloat16`)로 불러오는 것을 권장합니다. 로컬 벤치마크 (A100-80GB, CPUx12, RAM 96.6GB, PyTorch 2.2.0, OS Ubuntu 22.04)에서 `float16`을 사용해 학습 및 추론을 수행한 결과, 다음과 같은 속도 향상이 관찰되었습니다. #### 학습 [[Training]] |batch_size|seq_len|Time per batch (eager - s)|Time per batch (sdpa - s)|Speedup (%)|Eager peak mem (MB)|sdpa peak mem (MB)|Mem saving (%)| |----------|-------|--------------------------|-------------------------|-----------|-------------------|------------------|--------------| |4 |256 |0.023 |0.017 |35.472 |939.213 |764.834 |22.800 | |4 |512 |0.023 |0.018 |23.687 |1970.447 |1227.162 |60.569 | |8 |256 |0.023 |0.018 |23.491 |1594.295 |1226.114 |30.028 | |8 |512 |0.035 |0.025 |43.058 |3629.401 |2134.262 |70.054 | |16 |256 |0.030 |0.024 |25.583 |2874.426 |2134.262 |34.680 | |16 |512 |0.064 |0.044 |46.223 |6964.659 |3961.013 |75.830 | #### 추론 [[Inference]] |batch_size|seq_len|Per token latency eager (ms)|Per token latency SDPA (ms)|Speedup (%)|Mem eager (MB)|Mem BT (MB)|Mem saved (%)| |----------|-------|----------------------------|---------------------------|-----------|--------------|-----------|-------------| |1 |128 |5.736 |4.987 |15.022 |282.661 |282.924 |-0.093 | |1 |256 |5.689 |4.945 |15.055 |298.686 |298.948 |-0.088 | |2 |128 |6.154 |4.982 |23.521 |314.523 |314.785 |-0.083 | |2 |256 |6.201 |4.949 |25.303 |347.546 |347.033 |0.148 | |4 |128 |6.049 |4.987 |21.305 |378.895 |379.301 |-0.107 | |4 |256 |6.285 |5.364 |17.166 |443.209 |444.382 |-0.264 | ## 자료[[Resources]] BERT를 시작하는 데 도움이 되는 Hugging Face와 community 자료 목록(🌎로 표시됨) 입니다. 여기에 포함될 자료를 제출하고 싶다면 PR(Pull Request)를 열어주세요. 리뷰 해드리겠습니다! 자료는 기존 자료를 복제하는 대신 새로운 내용을 담고 있어야 합니다. <PipelineTag pipeline="text-classification"/> - [BERT 텍스트 분류 (다른 언어로)](https://www.philschmid.de/bert-text-classification-in-a-different-language)에 대한 블로그 포스트. - [다중 레이블 텍스트 분류를 위한 BERT (및 관련 모델) 미세 조정](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/BERT/Fine_tuning_BERT_(and_friends)_for_multi_label_text_classification.ipynb)에 대한 노트북. - [PyTorch를 이용해 BERT를 다중 레이블 분류를 위해 미세 조정하는 방법](htt기ps://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)에 대한 노트북. 🌎 - [BERT로 EncoderDecoder 모델을 warm-start하여 요약하기](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb)에 대한 노트북. - [`BertForSequenceClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)에서 지원됩니다. - [`TFBertForSequenceClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)에서 지원됩니다. - [`FlaxBertForSequenceClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb)에서 지원됩니다. - [텍스트 분류 작업 가이드](../tasks/sequence_classification) <PipelineTag pipeline="token-classification"/> - [Keras와 함께 Hugging Face Transformers를 사용하여 비영리 BERT를 개체명 인식(NER)용으로 미세 조정하는 방법](https://www.philschmid.de/huggingface-transformers-keras-tf)에 대한 블로그 포스트. - [BERT를 개체명 인식을 위해 미세 조정하기](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/BERT/Custom_Named_Entity_Recognition_with_BERT_only_first_wordpiece.ipynb)에 대한 노트북. 각 단어의 첫 번째 wordpiece에만 레이블을 지정하여 학습하는 방법을 설명합니다. 모든 wordpiece에 레이블을 전파하는 방법은 [이 버전](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/BERT/Custom_Named_Entity_Recognition_with_BERT.ipynb)에서 확인할 수 있습니다. - [`BertForTokenClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)에서 지원됩니다. - [`TFBertForTokenClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)에서 지원됩니다. - [`FlaxBertForTokenClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification)에서 지원됩니다. - 🤗 Hugging Face 코스의 [토큰 분류 챕터](https://huggingface.co/course/chapter7/2?fw=pt). - [토큰 분류 작업 가이드](../tasks/token_classification) <PipelineTag pipeline="fill-mask"/> - [`BertForMaskedLM`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)에서 지원됩니다. - [`TFBertForMaskedLM`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) 와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)에서 지원됩니다. - [`FlaxBertForMaskedLM`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb)에서 지원됩니다. - 🤗 Hugging Face 코스의 [마스킹된 언어 모델링 챕터](https://huggingface.co/course/chapter7/3?fw=pt). - [마스킹된 언어 모델링 작업 가이드](../tasks/masked_language_modeling) <PipelineTag pipeline="question-answering"/> - [`BertForQuestionAnswering`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)에서 지원됩니다. - [`TFBertForQuestionAnswering`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) 와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)에서 지원됩니다. - [`FlaxBertForQuestionAnswering`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering)에서 지원됩니다. - 🤗 Hugging Face 코스의 [질문 답변 챕터](https://huggingface.co/course/chapter7/7?fw=pt). - [질문 답변 작업 가이드](../tasks/question_answering) **다중 선택** - [`BertForMultipleChoice`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)에서 지원됩니다. - [`TFBertForMultipleChoice`]이 [에제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)에서 지원됩니다. - [다중 선택 작업 가이드](../tasks/multiple_choice) ⚡️ **추론** - [Hugging Face Transformers와 AWS Inferentia를 사용하여 BERT 추론을 가속화하는 방법](https://huggingface.co/blog/bert-inferentia-sagemaker)에 대한 블로그 포스트. - [GPU에서 DeepSpeed-Inference로 BERT 추론을 가속화하는 방법](https://www.philschmid.de/bert-deepspeed-inference)에 대한 블로그 포스트. ⚙️ **사전 학습** - [Hugging Face Optimum으로 Transformers를 ONMX로 변환하는 방법](https://www.philschmid.de/pre-training-bert-habana)에 대한 블로그 포스트. 🚀 **배포** - [Hugging Face Optimum으로 Transformers를 ONMX로 변환하는 방법](https://www.philschmid.de/convert-transformers-to-onnx)에 대한 블로그 포스트. - [AWS에서 Hugging Face Transformers를 위한 Habana Gaudi 딥러닝 환경 설정 방법](https://www.philschmid.de/getting-started-habana-gaudi#conclusion)에 대한 블로그 포스트. - [Hugging Face Transformers, Amazon SageMaker 및 Terraform 모듈을 이용한 BERT 자동 확장](https://www.philschmid.de/terraform-huggingface-amazon-sagemaker-advanced)에 대한 블로그 포스트. - [Hugging Face, AWS Lambda, Docker를 활용하여 서버리스 BERT 설정하는 방법](https://www.philschmid.de/serverless-bert-with-huggingface-aws-lambda-docker)에 대한 블로그 포스트. - [Amazon SageMaker와 Training Compiler를 사용하여 Hugging Face Transformers에서 BERT 미세 조정하는 방법](https://www.philschmid.de/huggingface-amazon-sagemaker-training-compiler)에 대한 블로그. - [Amazon SageMaker를 사용한 Transformers와 BERT의 작업별 지식 증류](https://www.philschmid.de/knowledge-distillation-bert-transformers)에 대한 블로그 포스트. ## BertConfig [[autodoc]] BertConfig - all ## BertTokenizer [[autodoc]] BertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary <frameworkcontent> <pt> ## BertTokenizerFast [[autodoc]] BertTokenizerFast </pt> <tf> ## TFBertTokenizer [[autodoc]] TFBertTokenizer </tf> </frameworkcontent> ## Bert specific outputs [[autodoc]] models.bert.modeling_bert.BertForPreTrainingOutput [[autodoc]] models.bert.modeling_tf_bert.TFBertForPreTrainingOutput [[autodoc]] models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput <frameworkcontent> <pt> ## BertModel [[autodoc]] BertModel - forward ## BertForPreTraining [[autodoc]] BertForPreTraining - forward ## BertLMHeadModel [[autodoc]] BertLMHeadModel - forward ## BertForMaskedLM [[autodoc]] BertForMaskedLM - forward ## BertForNextSentencePrediction [[autodoc]] BertForNextSentencePrediction - forward ## BertForSequenceClassification [[autodoc]] BertForSequenceClassification - forward ## BertForMultipleChoice [[autodoc]] BertForMultipleChoice - forward ## BertForTokenClassification [[autodoc]] BertForTokenClassification - forward ## BertForQuestionAnswering [[autodoc]] BertForQuestionAnswering - forward </pt> <tf> ## TFBertModel [[autodoc]] TFBertModel - call ## TFBertForPreTraining [[autodoc]] TFBertForPreTraining - call ## TFBertModelLMHeadModel [[autodoc]] TFBertLMHeadModel - call ## TFBertForMaskedLM [[autodoc]] TFBertForMaskedLM - call ## TFBertForNextSentencePrediction [[autodoc]] TFBertForNextSentencePrediction - call ## TFBertForSequenceClassification [[autodoc]] TFBertForSequenceClassification - call ## TFBertForMultipleChoice [[autodoc]] TFBertForMultipleChoice - call ## TFBertForTokenClassification [[autodoc]] TFBertForTokenClassification - call ## TFBertForQuestionAnswering [[autodoc]] TFBertForQuestionAnswering - call </tf> <jax> ## FlaxBertModel [[autodoc]] FlaxBertModel - __call__ ## FlaxBertForPreTraining [[autodoc]] FlaxBertForPreTraining - __call__ ## FlaxBertForCausalLM [[autodoc]] FlaxBertForCausalLM - __call__ ## FlaxBertForMaskedLM [[autodoc]] FlaxBertForMaskedLM - __call__ ## FlaxBertForNextSentencePrediction [[autodoc]] FlaxBertForNextSentencePrediction - __call__ ## FlaxBertForSequenceClassification [[autodoc]] FlaxBertForSequenceClassification - __call__ ## FlaxBertForMultipleChoice [[autodoc]] FlaxBertForMultipleChoice - __call__ ## FlaxBertForTokenClassification [[autodoc]] FlaxBertForTokenClassification - __call__ ## FlaxBertForQuestionAnswering [[autodoc]] FlaxBertForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/ko/model_doc/bert.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/bert.md", "repo_id": "transformers", "token_count": 10657 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPT-NeoX-Japanese [[gpt-neox-japanese]] ## 개요 [[overview]] 일본어를 위한 자동회귀 언어 모델인 GPT-NeoX-Japanese를 소개합니다. 이 모델은 [https://github.com/EleutherAI/gpt-neox](https://github.com/EleutherAI/gpt-neox)에서 학습되었습니다. 일본어는 많은 어휘와 히라가나, 가타카나, 한자의 조합으로 이루어진 독특한 언어입니다. 이러한 일본어의 독특한 구조를 해결하기 위해 [특수 서브워드 토크나이저](https://github.com/tanreinama/Japanese-BPEEncoder_V2)를 사용했습니다. 이 유용한 토크나이저를 오픈소스로 제공해 준 *tanreinama*에게 매우 감사드립니다. 이 모델은 Google의 [PaLM](https://ai.googleblog.com/2022/04/pathways-language-model-palm-scaling-to.html) 연구 권장 사항을 따르며, 트랜스포머 블록에서 편향 파라미터를 제거하여 모델 성능을 향상시켰습니다. 자세한 내용은 [이 기사](https://medium.com/ml-abeja/training-a-better-gpt-2-93b157662ae4)를 참조하세요. 모델 개발은 [ABEJA, Inc.](https://www.abejainc.com/)의 [신야 오타니](https://github.com/SO0529), [타카요시 마카베](https://github.com/spider-man-tm), [안주 아로라](https://github.com/Anuj040), [쿄 하토리](https://github.com/go5paopao)에 의해 주도되었습니다. 이 모델 개발 활동에 대한 자세한 내용은 [여기](https://tech-blog.abeja.asia/entry/abeja-gpt-project-202207)를 참조하세요. ### 사용 예시 [[usage-example]] `generate()` 메서드를 사용하면 GPT NeoX Japanese 모델을 통해 텍스트를 생성할 수 있습니다. ```python >>> from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseTokenizer >>> model = GPTNeoXJapaneseForCausalLM.from_pretrained("abeja/gpt-neox-japanese-2.7b") >>> tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b") >>> prompt = "人とAIが協調するためには、" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> gen_tokens = model.generate( ... input_ids, ... do_sample=True, ... temperature=0.9, ... max_length=100, ... ) >>> gen_text = tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)[0] >>> print(gen_text) 人とAIが協調するためには、AIと人が共存し、AIを正しく理解する必要があります。 ``` ## 자료 [[resources]] - [일상 언어 모델링 작업 가이드 ](../tasks/language_modeling) ## GPTNeoXJapanese 설정 (GPTNeoXJapaneseConfig) [[transformers.GPTNeoXJapaneseConfig]] [[autodoc]] GPTNeoXJapaneseConfig ## GPTNeoXJapanese토큰화 (GPTNeoXJapaneseTokenizer) [[transformers.GPTNeoXJapaneseTokenizer]] [[autodoc]] GPTNeoXJapaneseTokenizer ## GPTNeoXJapaneseModel [[transformers.GPTNeoXJapaneseModel]] [[autodoc]] GPTNeoXJapaneseModel - forward ## 일상 LLM 을 위한 GPTNeoXJapanese(GPTNeoXJapaneseForCausalLM) [[transformers.GPTNeoXJapaneseForCausalLM]] [[autodoc]] GPTNeoXJapaneseForCausalLM - forward
transformers/docs/source/ko/model_doc/gpt_neox_japanese.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/gpt_neox_japanese.md", "repo_id": "transformers", "token_count": 1909 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Swin2SR [[swin2sr]] ## 개요 [[overview]] Swin2SR 모델은 Marcos V. Conde, Ui-Jin Choi, Maxime Burchi, Radu Timofte가 제안한 논문 [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345)에서 소개되었습니다. Swin2SR은 [SwinIR](https://github.com/JingyunLiang/SwinIR/) 모델을 개선하고자 [Swin Transformer v2](swinv2) 레이어를 도입함으로써, 훈련 불안정성, 사전 훈련과 미세 조정 간의 해상도 차이, 그리고 데이터 의존성 문제를 완화시킵니다. 논문의 초록은 다음과 같습니다: *압축은 스트리밍 서비스, 가상 현실, 비디오 게임과 같은 대역폭이 제한된 시스템을 통해 이미지와 영상을 효율적으로 전송하고 저장하는 데 중요한 역할을 합니다. 하지만 압축은 필연적으로 원본 정보의 손실과 아티팩트를 초래하며, 이는 시각적 품질을 심각하게 저하시킬 수 있습니다. 이러한 이유로, 압축된 이미지의 품질 향상은 활발한 연구 주제가 되고 있습니다. 현재 대부분의 최첨단 이미지 복원 방법은 합성곱 신경망을 기반으로 하지만, SwinIR과 같은 트랜스포머 기반 방법들도 이 작업에서 인상적인 성능을 보여주고 있습니다. 이번 논문에서는 Swin Transformer V2를 사용해 SwinIR을 개선하여 이미지 초해상도 작업, 특히 압축된 입력 시나리오에서 성능을 향상시키고자 합니다. 이 방법을 통해 트랜스포머 비전 모델을 훈련할 때 발생하는 주요 문제들, 예를 들어 훈련 불안정성, 사전 훈련과 미세 조정 간 해상도 차이, 그리고 데이터 의존성을 해결할 수 있습니다. 우리는 JPEG 압축 아티팩트 제거, 이미지 초해상도(클래식 및 경량), 그리고 압축된 이미지 초해상도라는 세 가지 대표적인 작업에서 실험을 수행했습니다. 실험 결과, 우리의 방법인 Swin2SR은 SwinIR의 훈련 수렴성과 성능을 향상시킬 수 있으며, "AIM 2022 Challenge on Super-Resolution of Compressed Image and Video"에서 상위 5위 솔루션으로 선정되었습니다.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/swin2sr_architecture.png" alt="drawing" width="600"/> <small> Swin2SR 아키텍처. <a href="https://arxiv.org/abs/2209.11345">원본 논문</a>에서 발췌.</small> 이 모델은 [nielsr](https://huggingface.co/nielsr)가 기여하였습니다. 원본 코드는 [여기](https://github.com/mv-lab/swin2sr)에서 확인할 수 있습니다. ## 리소스 [[resources]] Swin2SR demo notebook은 [여기](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Swin2SR)에서 확인할 수 있습니다. SwinSR을 활용한 image super-resolution demo space는 [여기](https://huggingface.co/spaces/jjourney1125/swin2sr)에서 확인할 수 있습니다. ## Swin2SRImageProcessor [[transformers.Swin2SRImageProcessor]] [[autodoc]] Swin2SRImageProcessor - preprocess ## Swin2SRConfig [[transformers.Swin2SRConfig]] [[autodoc]] Swin2SRConfig ## Swin2SRModel [[transformers.Swin2SRModel]] [[autodoc]] Swin2SRModel - forward ## Swin2SRForImageSuperResolution [[transformers.Swin2SRForImageSuperResolution]] [[autodoc]] Swin2SRForImageSuperResolution - forward
transformers/docs/source/ko/model_doc/swin2sr.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/swin2sr.md", "repo_id": "transformers", "token_count": 2502 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CPU에서 효율적인 추론하기 [[efficient-inference-on-cpu]] 이 가이드는 CPU에서 대규모 모델을 효율적으로 추론하는 방법에 중점을 두고 있습니다. ## 더 빠른 추론을 위한 `BetterTransformer` [[bettertransformer-for-faster-inference]] 우리는 최근 CPU에서 텍스트, 이미지 및 오디오 모델의 빠른 추론을 위해 `BetterTransformer`를 통합했습니다. 이 통합에 대한 더 자세한 내용은 [이 문서](https://huggingface.co/docs/optimum/bettertransformer/overview)를 참조하세요. ## PyTorch JIT 모드 (TorchScript) [[pytorch-jitmode-torchscript]] TorchScript는 PyTorch 코드에서 직렬화와 최적화가 가능한 모델을 생성할때 쓰입니다. TorchScript로 만들어진 프로그램은 기존 Python 프로세스에서 저장한 뒤, 종속성이 없는 새로운 프로세스로 가져올 수 있습니다. PyTorch의 기본 설정인 `eager` 모드와 비교했을때, `jit` 모드는 연산자 결합과 같은 최적화 방법론을 통해 모델 추론에서 대부분 더 나은 성능을 제공합니다. TorchScript에 대한 친절한 소개는 [PyTorch TorchScript 튜토리얼](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#tracing-modules)을 참조하세요. ### JIT 모드와 함께하는 IPEX 그래프 최적화 [[ipex-graph-optimization-with-jitmode]] Intel® Extension for PyTorch(IPEX)는 Transformers 계열 모델의 jit 모드에서 추가적인 최적화를 제공합니다. jit 모드와 더불어 Intel® Extension for PyTorch(IPEX)를 활용하시길 강력히 권장드립니다. Transformers 모델에서 자주 사용되는 일부 연산자 패턴은 이미 jit 모드 연산자 결합(operator fusion)의 형태로 Intel® Extension for PyTorch(IPEX)에서 지원되고 있습니다. Multi-head-attention, Concat Linear, Linear+Add, Linear+Gelu, Add+LayerNorm 결합 패턴 등이 이용 가능하며 활용했을 때 성능이 우수합니다. 연산자 결합의 이점은 사용자에게 고스란히 전달됩니다. 분석에 따르면, 질의 응답, 텍스트 분류 및 토큰 분류와 같은 가장 인기 있는 NLP 태스크 중 약 70%가 이러한 결합 패턴을 사용하여 Float32 정밀도와 BFloat16 혼합 정밀도 모두에서 성능상의 이점을 얻을 수 있습니다. [IPEX 그래프 최적화](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/graph_optimization.html)에 대한 자세한 정보를 확인하세요. #### IPEX 설치: [[ipex-installation]] IPEX 배포 주기는 PyTorch를 따라서 이루어집니다. 자세한 정보는 [IPEX 설치 방법](https://intel.github.io/intel-extension-for-pytorch/)을 확인하세요. ### JIT 모드 사용법 [[usage-of-jitmode]] 평가 또는 예측을 위해 Trainer에서 JIT 모드를 사용하려면 Trainer의 명령 인수에 `jit_mode_eval`을 추가해야 합니다. <Tip warning={true}> PyTorch의 버전이 1.14.0 이상이라면, jit 모드는 jit.trace에서 dict 입력이 지원되므로, 모든 모델의 예측과 평가가 개선될 수 있습니다. PyTorch의 버전이 1.14.0 미만이라면, 질의 응답 모델과 같이 forward 매개변수의 순서가 jit.trace의 튜플 입력 순서와 일치하는 모델에 득이 될 수 있습니다. 텍스트 분류 모델과 같이 forward 매개변수 순서가 jit.trace의 튜플 입력 순서와 다른 경우, jit.trace가 실패하며 예외가 발생합니다. 이때 예외상황을 사용자에게 알리기 위해 Logging이 사용됩니다. </Tip> [Transformers 질의 응답](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)의 사용 사례 예시를 참조하세요. - CPU에서 jit 모드를 사용한 추론: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--jit_mode_eval </b></pre> - CPU에서 IPEX와 함께 jit 모드를 사용한 추론: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--use_ipex \</b> <b>--jit_mode_eval</b></pre>
transformers/docs/source/ko/perf_infer_cpu.md/0
{ "file_path": "transformers/docs/source/ko/perf_infer_cpu.md", "repo_id": "transformers", "token_count": 3075 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # EETQ [[eetq]] [EETQ](https://github.com/NetEase-FuXi/EETQ) 라이브러리는 NVIDIA GPU에 대해 int8 채널별(per-channel) 가중치 전용 양자화(weight-only quantization)을 지원합니다. 고성능 GEMM 및 GEMV 커널은 FasterTransformer 및 TensorRT-LLM에서 가져왔습니다. 교정(calibration) 데이터셋이 필요 없으며, 모델을 사전에 양자화할 필요도 없습니다. 또한, 채널별 양자화(per-channel quantization) 덕분에 정확도 저하가 미미합니다. [릴리스 페이지](https://github.com/NetEase-FuXi/EETQ/releases)에서 eetq를 설치했는지 확인하세요. ``` pip install --no-cache-dir https://github.com/NetEase-FuXi/EETQ/releases/download/v1.0.0/EETQ-1.0.0+cu121+torch2.1.2-cp310-cp310-linux_x86_64.whl ``` 또는 소스 코드 https://github.com/NetEase-FuXi/EETQ 에서 설치할 수 있습니다. EETQ는 CUDA 기능이 8.9 이하이고 7.0 이상이어야 합니다. ``` git clone https://github.com/NetEase-FuXi/EETQ.git cd EETQ/ git submodule update --init --recursive pip install . ``` 비양자화 모델은 "from_pretrained"를 통해 양자화할 수 있습니다. ```py from transformers import AutoModelForCausalLM, EetqConfig path = "/path/to/model". quantization_config = EetqConfig("int8") model = AutoModelForCausalLM.from_pretrained(path, device_map="auto", quantization_config=quantization_config) ``` 양자화된 모델은 "save_pretrained"를 통해 저장할 수 있으며, "from_pretrained"를 통해 다시 사용할 수 있습니다. ```py quant_path = "/path/to/save/quantized/model" model.save_pretrained(quant_path) model = AutoModelForCausalLM.from_pretrained(quant_path, device_map="auto") ```
transformers/docs/source/ko/quantization/eetq.md/0
{ "file_path": "transformers/docs/source/ko/quantization/eetq.md", "repo_id": "transformers", "token_count": 1163 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 컴퓨터 비전을 위한 지식 증류[[Knowledge-Distillation-for-Computer-Vision]] [[open-in-colab]] 지식 증류(Knowledge distillation)는 더 크고 복잡한 모델(교사)에서 더 작고 간단한 모델(학생)로 지식을 전달하는 기술입니다. 한 모델에서 다른 모델로 지식을 증류하기 위해, 특정 작업(이 경우 이미지 분류)에 대해 학습된 사전 훈련된 교사 모델을 사용하고, 랜덤으로 초기화된 학생 모델을 이미지 분류 작업에 대해 학습합니다. 그다음, 학생 모델이 교사 모델의 출력을 모방하여 두 모델의 출력 차이를 최소화하도록 훈련합니다. 이 기법은 Hinton 등 연구진의 [Distilling the Knowledge in a Neural Network](https://arxiv.org/abs/1503.02531)에서 처음 소개되었습니다. 이 가이드에서는 특정 작업에 맞춘 지식 증류를 수행할 것입니다. 이번에는 [beans dataset](https://huggingface.co/datasets/beans)을 사용할 것입니다. 이 가이드는 [미세 조정된 ViT 모델](https://huggingface.co/merve/vit-mobilenet-beans-224) (교사 모델)을 [MobileNet](https://huggingface.co/google/mobilenet_v2_1.4_224) (학생 모델)으로 증류하는 방법을 🤗 Transformers의 [Trainer API](https://huggingface.co/docs/transformers/en/main_classes/trainer#trainer) 를 사용하여 보여줍니다. 증류와 과정 평가를 위해 필요한 라이브러리를 설치해 봅시다. ```bash pip install transformers datasets accelerate tensorboard evaluate --upgrade ``` 이 예제에서는 `merve/beans-vit-224` 모델을 교사 모델로 사용하고 있습니다. 이 모델은 beans 데이터셋에서 파인 튜닝된 `google/vit-base-patch16-224-in21k` 기반의 이미지 분류 모델입니다. 이 모델을 무작위로 초기화된 MobileNetV2로 증류해볼 것입니다. 이제 데이터셋을 로드하겠습니다. ```python from datasets import load_dataset dataset = load_dataset("beans") ``` 이 경우 두 모델의 이미지 프로세서가 동일한 해상도로 동일한 출력을 반환하기 때문에, 두가지를 모두 사용할 수 있습니다. 데이터셋의 모든 분할마다 전처리를 적용하기 위해 `dataset`의 `map()` 메소드를 사용할 것 입니다. ```python from transformers import AutoImageProcessor teacher_processor = AutoImageProcessor.from_pretrained("merve/beans-vit-224") def process(examples): processed_inputs = teacher_processor(examples["image"]) return processed_inputs processed_datasets = dataset.map(process, batched=True) ``` 학생 모델(무작위로 초기화된 MobileNet)이 교사 모델(파인 튜닝된 비전 트랜스포머)을 모방하도록 할 것 입니다. 이를 위해 먼저 교사와 학생 모델의 로짓 출력값을 구합니다. 그런 다음 각 출력값을 매개변수 `temperature` 값으로 나누는데, 이 매개변수는 각 소프트 타겟의 중요도를 조절하는 역할을 합니다. 매개변수 `lambda` 는 증류 손실의 중요도에 가중치를 줍니다. 이 예제에서는 `temperature=5`와 `lambda=0.5`를 사용할 것입니다. 학생과 교사 간의 발산을 계산하기 위해 Kullback-Leibler Divergence 손실을 사용합니다. 두 데이터 P와 Q가 주어졌을 때, KL Divergence는 Q를 사용하여 P를 표현하는 데 얼만큼의 추가 정보가 필요한지를 말해줍니다. 두 데이터가 동일하다면, KL Divergence는 0이며, Q로 P를 설명하는 데 추가 정보가 필요하지 않음을 의미합니다. 따라서 지식 증류의 맥락에서 KL Divergence는 유용합니다. ```python from transformers import TrainingArguments, Trainer import torch import torch.nn as nn import torch.nn.functional as F class ImageDistilTrainer(Trainer): def __init__(self, teacher_model=None, student_model=None, temperature=None, lambda_param=None, *args, **kwargs): super().__init__(model=student_model, *args, **kwargs) self.teacher = teacher_model self.student = student_model self.loss_function = nn.KLDivLoss(reduction="batchmean") device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.teacher.to(device) self.teacher.eval() self.temperature = temperature self.lambda_param = lambda_param def compute_loss(self, student, inputs, return_outputs=False): student_output = self.student(**inputs) with torch.no_grad(): teacher_output = self.teacher(**inputs) # 교사와 학생의 소프트 타겟(soft targets) 계산 soft_teacher = F.softmax(teacher_output.logits / self.temperature, dim=-1) soft_student = F.log_softmax(student_output.logits / self.temperature, dim=-1) # 손실(loss) 계산 distillation_loss = self.loss_function(soft_student, soft_teacher) * (self.temperature ** 2) # 실제 레이블 손실 계산 student_target_loss = student_output.loss # 최종 손실 계산 loss = (1. - self.lambda_param) * student_target_loss + self.lambda_param * distillation_loss return (loss, student_output) if return_outputs else loss ``` 이제 Hugging Face Hub에 로그인하여 `Trainer`를 통해 Hugging Face Hub에 모델을 푸시할 수 있도록 하겠습니다. ```python from huggingface_hub import notebook_login notebook_login() ``` 이제 `TrainingArguments`, 교사 모델과 학생 모델을 설정하겠습니다. ```python from transformers import AutoModelForImageClassification, MobileNetV2Config, MobileNetV2ForImageClassification training_args = TrainingArguments( output_dir="my-awesome-model", num_train_epochs=30, fp16=True, logging_dir=f"{repo_name}/logs", logging_strategy="epoch", eval_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, metric_for_best_model="accuracy", report_to="tensorboard", push_to_hub=True, hub_strategy="every_save", hub_model_id=repo_name, ) num_labels = len(processed_datasets["train"].features["labels"].names) # 모델 초기화 teacher_model = AutoModelForImageClassification.from_pretrained( "merve/beans-vit-224", num_labels=num_labels, ignore_mismatched_sizes=True ) # MobileNetV2 밑바닥부터 학습 student_config = MobileNetV2Config() student_config.num_labels = num_labels student_model = MobileNetV2ForImageClassification(student_config) ``` `compute_metrics` 함수를 사용하여 테스트 세트에서 모델을 평가할 수 있습니다. 이 함수는 훈련 과정에서 모델의 `accuracy`와 `f1`을 계산하는 데 사용됩니다. ```python import evaluate import numpy as np accuracy = evaluate.load("accuracy") def compute_metrics(eval_pred): predictions, labels = eval_pred acc = accuracy.compute(references=labels, predictions=np.argmax(predictions, axis=1)) return {"accuracy": acc["accuracy"]} ``` 정의한 훈련 인수로 `Trainer`를 초기화해봅시다. 또한 데이터 콜레이터(data collator)를 초기화하겠습니다. ```python from transformers import DefaultDataCollator data_collator = DefaultDataCollator() trainer = ImageDistilTrainer( student_model=student_model, teacher_model=teacher_model, training_args=training_args, train_dataset=processed_datasets["train"], eval_dataset=processed_datasets["validation"], data_collator=data_collator, tokenizer=teacher_processor, compute_metrics=compute_metrics, temperature=5, lambda_param=0.5 ) ``` 이제 모델을 훈련할 수 있습니다. ```python trainer.train() ``` 모델을 테스트 세트에서 평가할 수 있습니다. ```python trainer.evaluate(processed_datasets["test"]) ``` 테스트 세트에서 모델의 정확도는 72%에 도달했습니다. 증류의 효율성을 검증하기 위해 동일한 하이퍼파라미터로 beans 데이터셋에서 MobileNet을 처음부터 훈련하였고, 테스트 세트에서의 정확도는 63% 였습니다. 다양한 사전 훈련된 교사 모델, 학생 구조, 증류 매개변수를 시도해보시고 결과를 보고하기를 권장합니다. 증류된 모델의 훈련 로그와 체크포인트는 [이 저장소](https://huggingface.co/merve/vit-mobilenet-beans-224)에서 찾을 수 있으며, 처음부터 훈련된 MobileNetV2는 이 [저장소](https://huggingface.co/merve/resnet-mobilenet-beans-5)에서 찾을 수 있습니다.
transformers/docs/source/ko/tasks/knowledge_distillation_for_image_classification.md/0
{ "file_path": "transformers/docs/source/ko/tasks/knowledge_distillation_for_image_classification.md", "repo_id": "transformers", "token_count": 5192 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 제로샷(zero-shot) 이미지 분류[[zeroshot-image-classification]] [[open-in-colab]] 제로샷(zero-shot) 이미지 분류는 특정 카테고리의 예시가 포함된 데이터를 학습되지 않은 모델을 사용해 이미지 분류를 수행하는 작업입니다. 일반적으로 이미지 분류를 위해서는 레이블이 달린 특정 이미지 데이터로 모델 학습이 필요하며, 이 모델은 특정 이미지의 특징을 레이블에 "매핑"하는 방법을 학습합니다. 새로운 레이블이 있는 분류 작업에 이러한 모델을 사용해야 하는 경우에는, 모델을 "재보정"하기 위해 미세 조정이 필요합니다. 이와 대조적으로, 제로샷 또는 개방형 어휘(open vocabulary) 이미지 분류 모델은 일반적으로 대규모 이미지 데이터와 해당 설명에 대해 학습된 멀티모달(multimodal) 모델입니다. 이러한 모델은 제로샷 이미지 분류를 포함한 많은 다운스트림 작업에 사용할 수 있는 정렬된(aligned) 비전 언어 표현을 학습합니다. 이는 이미지 분류에 대한 보다 유연한 접근 방식으로, 추가 학습 데이터 없이 새로운 레이블이나 학습하지 못한 카테고리에 대해 모델을 일반화할 수 있습니다. 또한, 사용자가 대상 개체에 대한 자유 형식의 텍스트 설명으로 이미지를 검색할 수 있습니다. 이번 가이드에서 배울 내용은 다음과 같습니다: * 제로샷 이미지 분류 파이프라인 만들기 * 직접 제로샷 이미지 분류 모델 추론 실행하기 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash pip install -q transformers ``` ## 제로샷(zero-shot) 이미지 분류 파이프라인[[zeroshot-image-classification-pipeline]] [`pipeline`]을 활용하면 가장 간단하게 제로샷 이미지 분류를 지원하는 모델로 추론해볼 수 있습니다. [Hugging Face Hub에 업로드된 체크포인트](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads)에서 파이프라인을 인스턴스화합니다. ```python >>> from transformers import pipeline >>> checkpoint = "openai/clip-vit-large-patch14" >>> detector = pipeline(model=checkpoint, task="zero-shot-image-classification") ``` 다음으로, 분류하고 싶은 이미지를 선택하세요. ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/g8oS8-82DxI/download?ixid=MnwxMjA3fDB8MXx0b3BpY3x8SnBnNktpZGwtSGt8fHx8fDJ8fDE2NzgxMDYwODc&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/owl.jpg" alt="Photo of an owl"/> </div> 이미지와 해당 이미지의 후보 레이블인 `candidate_labels`를 파이프라인으로 전달합니다. 여기서는 이미지를 직접 전달하지만, 컴퓨터에 저장된 이미지의 경로나 url로 전달할 수도 있습니다. `candidate_labels`는 이 예시처럼 간단한 단어일 수도 있고 좀 더 설명적인 단어일 수도 있습니다. ```py >>> predictions = classifier(image, candidate_labels=["fox", "bear", "seagull", "owl"]) >>> predictions [{'score': 0.9996670484542847, 'label': 'owl'}, {'score': 0.000199399160919711, 'label': 'seagull'}, {'score': 7.392891711788252e-05, 'label': 'fox'}, {'score': 5.96074532950297e-05, 'label': 'bear'}] ``` ## 직접 제로샷(zero-shot) 이미지 분류하기[[zeroshot-image-classification-by-hand]] 이제 제로샷 이미지 분류 파이프라인 사용 방법을 살펴보았으니, 실행하는 방법을 살펴보겠습니다. [Hugging Face Hub에 업로드된 체크포인트](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads)에서 모델과 프로세서를 가져오는 것으로 시작합니다. 여기서는 이전과 동일한 체크포인트를 사용하겠습니다: ```py >>> from transformers import AutoProcessor, AutoModelForZeroShotImageClassification >>> model = AutoModelForZeroShotImageClassification.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) ``` 다른 이미지를 사용해 보겠습니다. ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/xBRQfR2bqNI/download?ixid=MnwxMjA3fDB8MXxhbGx8fHx8fHx8fHwxNjc4Mzg4ODEx&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg" alt="Photo of a car"/> </div> 프로세서를 사용해 모델의 입력을 준비합니다. 프로세서는 모델의 입력으로 사용하기 위해 이미지 크기를 변환하고 정규화하는 이미지 프로세서와 텍스트 입력을 처리하는 토크나이저로 구성됩니다. ```py >>> candidate_labels = ["tree", "car", "bike", "cat"] >>> inputs = processor(images=image, text=candidate_labels, return_tensors="pt", padding=True) ``` 모델에 입력을 전달하고, 결과를 후처리합니다: ```py >>> import torch >>> with torch.no_grad(): ... outputs = model(**inputs) >>> logits = outputs.logits_per_image[0] >>> probs = logits.softmax(dim=-1).numpy() >>> scores = probs.tolist() >>> result = [ ... {"score": score, "label": candidate_label} ... for score, candidate_label in sorted(zip(probs, candidate_labels), key=lambda x: -x[0]) ... ] >>> result [{'score': 0.998572, 'label': 'car'}, {'score': 0.0010570387, 'label': 'bike'}, {'score': 0.0003393686, 'label': 'tree'}, {'score': 3.1572064e-05, 'label': 'cat'}] ```
transformers/docs/source/ko/tasks/zero_shot_image_classification.md/0
{ "file_path": "transformers/docs/source/ko/tasks/zero_shot_image_classification.md", "repo_id": "transformers", "token_count": 3889 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Treinamento distribuído com o 🤗 Accelerate O paralelismo surgiu como uma estratégia para treinar modelos grandes em hardware limitado e aumentar a velocidade de treinamento em várias órdens de magnitude. Na Hugging Face criamos a biblioteca [🤗 Accelerate](https://huggingface.co/docs/accelerate) para ajudar os usuários a treinar modelos 🤗 Transformers com qualquer configuração distribuída, seja em uma máquina com múltiplos GPUs ou em múltiplos GPUs distribuidos entre muitas máquinas. Neste tutorial, você irá aprender como personalizar seu laço de treinamento de PyTorch para poder treinar em ambientes distribuídos. ## Configuração De início, instale o 🤗 Accelerate: ```bash pip install accelerate ``` Logo, devemos importar e criar um objeto [`Accelerator`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator). O `Accelerator` detectará automáticamente a configuração distribuída disponível e inicializará todos os componentes necessários para o treinamento. Não há necessidade portanto de especificar o dispositivo onde deve colocar seu modelo. ```py >>> from accelerate import Accelerator >>> accelerator = Accelerator() ``` ## Preparando a aceleração Passe todos os objetos relevantes ao treinamento para o método [`prepare`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare). Isto inclui os DataLoaders de treino e evaluação, um modelo e um otimizador: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( ... train_dataloader, eval_dataloader, model, optimizer ... ) ``` ## Backward Por último, substitua o `loss.backward()` padrão em seu laço de treinamento com o método [`backward`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward) do 🤗 Accelerate: ```py >>> for epoch in range(num_epochs): ... for batch in train_dataloader: ... outputs = model(**batch) ... loss = outputs.loss ... accelerator.backward(loss) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ``` Como se poder ver no seguinte código, só precisará adicionar quatro linhas de código ao seu laço de treinamento para habilitar o treinamento distribuído! ```diff + from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) ``` ## Treinamento Quando tiver adicionado as linhas de código relevantes, inicie o treinamento por um script ou notebook como o Colab. ### Treinamento em um Script Se estiver rodando seu treinamento em um Script, execute o seguinte comando para criar e guardar um arquivo de configuração: ```bash accelerate config ``` Comece o treinamento com: ```bash accelerate launch train.py ``` ### Treinamento em um Notebook O 🤗 Accelerate pode rodar em um notebook, por exemplo, se estiver planejando usar as TPUs do Google Colab. Encapsule o código responsável pelo treinamento de uma função e passe-o ao `notebook_launcher`: ```py >>> from accelerate import notebook_launcher >>> notebook_launcher(training_function) ``` Para obter mais informações sobre o 🤗 Accelerate e suas numerosas funções, consulte a [documentación](https://huggingface.co/docs/accelerate/index).
transformers/docs/source/pt/accelerate.md/0
{ "file_path": "transformers/docs/source/pt/accelerate.md", "repo_id": "transformers", "token_count": 1904 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 创建自定义架构 [`AutoClass`](model_doc/auto) 自动推断模型架构并下载预训练的配置和权重。一般来说,我们建议使用 `AutoClass` 生成与检查点(checkpoint)无关的代码。希望对特定模型参数有更多控制的用户,可以仅从几个基类创建自定义的 🤗 Transformers 模型。这对于任何有兴趣学习、训练或试验 🤗 Transformers 模型的人可能特别有用。通过本指南,深入了解如何不通过 `AutoClass` 创建自定义模型。了解如何: - 加载并自定义模型配置。 - 创建模型架构。 - 为文本创建慢速和快速分词器。 - 为视觉任务创建图像处理器。 - 为音频任务创建特征提取器。 - 为多模态任务创建处理器。 ## 配置 [配置](main_classes/configuration) 涉及到模型的具体属性。每个模型配置都有不同的属性;例如,所有 NLP 模型都共享 `hidden_size`、`num_attention_heads`、 `num_hidden_layers` 和 `vocab_size` 属性。这些属性用于指定构建模型时的注意力头数量或隐藏层层数。 访问 [`DistilBertConfig`] 以更近一步了解 [DistilBERT](model_doc/distilbert),检查它的属性: ```py >>> from transformers import DistilBertConfig >>> config = DistilBertConfig() >>> print(config) DistilBertConfig { "activation": "gelu", "attention_dropout": 0.1, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` [`DistilBertConfig`] 显示了构建基础 [`DistilBertModel`] 所使用的所有默认属性。所有属性都可以进行自定义,为实验创造了空间。例如,您可以将默认模型自定义为: - 使用 `activation` 参数尝试不同的激活函数。 - 使用 `attention_dropout` 参数为 attention probabilities 使用更高的 dropout ratio。 ```py >>> my_config = DistilBertConfig(activation="relu", attention_dropout=0.4) >>> print(my_config) DistilBertConfig { "activation": "relu", "attention_dropout": 0.4, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` 预训练模型的属性可以在 [`~PretrainedConfig.from_pretrained`] 函数中进行修改: ```py >>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4) ``` 当你对模型配置满意时,可以使用 [`~PretrainedConfig.save_pretrained`] 来保存配置。你的配置文件将以 JSON 文件的形式存储在指定的保存目录中: ```py >>> my_config.save_pretrained(save_directory="./your_model_save_path") ``` 要重用配置文件,请使用 [`~PretrainedConfig.from_pretrained`] 进行加载: ```py >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json") ``` <Tip> 你还可以将配置文件保存为字典,甚至只保存自定义配置属性与默认配置属性之间的差异!有关更多详细信息,请参阅 [配置](main_classes/configuration) 文档。 </Tip> ## 模型 接下来,创建一个[模型](main_classes/models)。模型,也可泛指架构,定义了每一层网络的行为以及进行的操作。配置中的 `num_hidden_layers` 等属性用于定义架构。每个模型都共享基类 [`PreTrainedModel`] 和一些常用方法,例如调整输入嵌入的大小和修剪自注意力头。此外,所有模型都是 [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)、[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) 或 [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) 的子类。这意味着模型与各自框架的用法兼容。 <frameworkcontent> <pt> 将自定义配置属性加载到模型中: ```py >>> from transformers import DistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json") >>> model = DistilBertModel(my_config) ``` 这段代码创建了一个具有随机参数而不是预训练权重的模型。在训练该模型之前,您还无法将该模型用于任何用途。训练是一项昂贵且耗时的过程。通常来说,最好使用预训练模型来更快地获得更好的结果,同时仅使用训练所需资源的一小部分。 使用 [`~PreTrainedModel.from_pretrained`] 创建预训练模型: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` 当加载预训练权重时,如果模型是由 🤗 Transformers 提供的,将自动加载默认模型配置。然而,如果你愿意,仍然可以将默认模型配置的某些或者所有属性替换成你自己的配置: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </pt> <tf> 将自定义配置属性加载到模型中: ```py >>> from transformers import TFDistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") >>> tf_model = TFDistilBertModel(my_config) ``` 这段代码创建了一个具有随机参数而不是预训练权重的模型。在训练该模型之前,您还无法将该模型用于任何用途。训练是一项昂贵且耗时的过程。通常来说,最好使用预训练模型来更快地获得更好的结果,同时仅使用训练所需资源的一小部分。 使用 [`~TFPreTrainedModel.from_pretrained`] 创建预训练模型: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` 当加载预训练权重时,如果模型是由 🤗 Transformers 提供的,将自动加载默认模型配置。然而,如果你愿意,仍然可以将默认模型配置的某些或者所有属性替换成自己的配置: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </tf> </frameworkcontent> ### 模型头(Model heads) 此时,你已经有了一个输出*隐藏状态*的基础 DistilBERT 模型。隐藏状态作为输入传递到模型头以生成最终输出。🤗 Transformers 为每个任务提供不同的模型头,只要模型支持该任务(即,您不能使用 DistilBERT 来执行像翻译这样的序列到序列任务)。 <frameworkcontent> <pt> 例如,[`DistilBertForSequenceClassification`] 是一个带有序列分类头(sequence classification head)的基础 DistilBERT 模型。序列分类头是池化输出之上的线性层。 ```py >>> from transformers import DistilBertForSequenceClassification >>> model = DistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 通过切换到不同的模型头,可以轻松地将此检查点重复用于其他任务。对于问答任务,你可以使用 [`DistilBertForQuestionAnswering`] 模型头。问答头(question answering head)与序列分类头类似,不同点在于它是隐藏状态输出之上的线性层。 ```py >>> from transformers import DistilBertForQuestionAnswering >>> model = DistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </pt> <tf> 例如,[`TFDistilBertForSequenceClassification`] 是一个带有序列分类头(sequence classification head)的基础 DistilBERT 模型。序列分类头是池化输出之上的线性层。 ```py >>> from transformers import TFDistilBertForSequenceClassification >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 通过切换到不同的模型头,可以轻松地将此检查点重复用于其他任务。对于问答任务,你可以使用 [`TFDistilBertForQuestionAnswering`] 模型头。问答头(question answering head)与序列分类头类似,不同点在于它是隐藏状态输出之上的线性层。 ```py >>> from transformers import TFDistilBertForQuestionAnswering >>> tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </tf> </frameworkcontent> ## 分词器 在将模型用于文本数据之前,你需要的最后一个基类是 [tokenizer](main_classes/tokenizer),它用于将原始文本转换为张量。🤗 Transformers 支持两种类型的分词器: - [`PreTrainedTokenizer`]:分词器的Python实现 - [`PreTrainedTokenizerFast`]:来自我们基于 Rust 的 [🤗 Tokenizer](https://huggingface.co/docs/tokenizers/python/latest/) 库的分词器。因为其使用了 Rust 实现,这种分词器类型的速度要快得多,尤其是在批量分词(batch tokenization)的时候。快速分词器还提供其他的方法,例如*偏移映射(offset mapping)*,它将标记(token)映射到其原始单词或字符。 这两种分词器都支持常用的方法,如编码和解码、添加新标记以及管理特殊标记。 <Tip warning={true}> 并非每个模型都支持快速分词器。参照这张 [表格](index#supported-frameworks) 查看模型是否支持快速分词器。 </Tip> 如果您训练了自己的分词器,则可以从*词表*文件创建一个分词器: ```py >>> from transformers import DistilBertTokenizer >>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left") ``` 请务必记住,自定义分词器生成的词表与预训练模型分词器生成的词表是不同的。如果使用预训练模型,则需要使用预训练模型的词表,否则输入将没有意义。 使用 [`DistilBertTokenizer`] 类创建具有预训练模型词表的分词器: ```py >>> from transformers import DistilBertTokenizer >>> slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 使用 [`DistilBertTokenizerFast`] 类创建快速分词器: ```py >>> from transformers import DistilBertTokenizerFast >>> fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip> 默认情况下,[`AutoTokenizer`] 将尝试加载快速标记生成器。你可以通过在 `from_pretrained` 中设置 `use_fast=False` 以禁用此行为。 </Tip> ## 图像处理器 图像处理器用于处理视觉输入。它继承自 [`~image_processing_utils.ImageProcessingMixin`] 基类。 要使用它,需要创建一个与你使用的模型关联的图像处理器。例如,如果你使用 [ViT](model_doc/vit) 进行图像分类,可以创建一个默认的 [`ViTImageProcessor`]: ```py >>> from transformers import ViTImageProcessor >>> vit_extractor = ViTImageProcessor() >>> print(vit_extractor) ViTImageProcessor { "do_normalize": true, "do_resize": true, "image_processor_type": "ViTImageProcessor", "image_mean": [ 0.5, 0.5, 0.5 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": 2, "size": 224 } ``` <Tip> 如果您不需要进行任何自定义,只需使用 `from_pretrained` 方法加载模型的默认图像处理器参数。 </Tip> 修改任何 [`ViTImageProcessor`] 参数以创建自定义图像处理器: ```py >>> from transformers import ViTImageProcessor >>> my_vit_extractor = ViTImageProcessor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3]) >>> print(my_vit_extractor) ViTImageProcessor { "do_normalize": false, "do_resize": true, "image_processor_type": "ViTImageProcessor", "image_mean": [ 0.3, 0.3, 0.3 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": "PIL.Image.BOX", "size": 224 } ``` ## 特征提取器 特征提取器用于处理音频输入。它继承自 [`~feature_extraction_utils.FeatureExtractionMixin`] 基类,亦可继承 [`SequenceFeatureExtractor`] 类来处理音频输入。 要使用它,创建一个与你使用的模型关联的特征提取器。例如,如果你使用 [Wav2Vec2](model_doc/wav2vec2) 进行音频分类,可以创建一个默认的 [`Wav2Vec2FeatureExtractor`]: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor() >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": true, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 16000 } ``` <Tip> 如果您不需要进行任何自定义,只需使用 `from_pretrained` 方法加载模型的默认特征提取器参数。 </Tip> 修改任何 [`Wav2Vec2FeatureExtractor`] 参数以创建自定义特征提取器: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor(sampling_rate=8000, do_normalize=False) >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": false, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 8000 } ``` ## 处理器 对于支持多模式任务的模型,🤗 Transformers 提供了一个处理器类,可以方便地将特征提取器和分词器等处理类包装到单个对象中。例如,让我们使用 [`Wav2Vec2Processor`] 来执行自动语音识别任务 (ASR)。 ASR 将音频转录为文本,因此您将需要一个特征提取器和一个分词器。 创建一个特征提取器来处理音频输入: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True) ``` 创建一个分词器来处理文本输入: ```py >>> from transformers import Wav2Vec2CTCTokenizer >>> tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt") ``` 将特征提取器和分词器合并到 [`Wav2Vec2Processor`] 中: ```py >>> from transformers import Wav2Vec2Processor >>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) ``` 通过两个基类 - 配置类和模型类 - 以及一个附加的预处理类(分词器、图像处理器、特征提取器或处理器),你可以创建 🤗 Transformers 支持的任何模型。 每个基类都是可配置的,允许你使用所需的特定属性。 你可以轻松设置模型进行训练或修改现有的预训练模型进行微调。
transformers/docs/source/zh/create_a_model.md/0
{ "file_path": "transformers/docs/source/zh/create_a_model.md", "repo_id": "transformers", "token_count": 8520 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines pipelines是使用模型进行推理的一种简单方法。这些pipelines是抽象了库中大部分复杂代码的对象,提供了一个专用于多个任务的简单API,包括专名识别、掩码语言建模、情感分析、特征提取和问答等。请参阅[任务摘要](../task_summary)以获取使用示例。 有两种pipelines抽象类需要注意: - [`pipeline`],它是封装所有其他pipelines的最强大的对象。 - 针对特定任务pipelines,适用于[音频](#audio)、[计算机视觉](#computer-vision)、[自然语言处理](#natural-language-processing)和[多模态](#multimodal)任务。 ## pipeline抽象类 *pipeline*抽象类是对所有其他可用pipeline的封装。它可以像任何其他pipeline一样实例化,但进一步提供额外的便利性。 简单调用一个项目: ```python >>> pipe = pipeline("text-classification") >>> pipe("This restaurant is awesome") [{'label': 'POSITIVE', 'score': 0.9998743534088135}] ``` 如果您想使用 [hub](https://huggingface.co) 上的特定模型,可以忽略任务,如果hub上的模型已经定义了该任务: ```python >>> pipe = pipeline(model="FacebookAI/roberta-large-mnli") >>> pipe("This restaurant is awesome") [{'label': 'NEUTRAL', 'score': 0.7313136458396912}] ``` 要在多个项目上调用pipeline,可以使用*列表*调用它。 ```python >>> pipe = pipeline("text-classification") >>> pipe(["This restaurant is awesome", "This restaurant is awful"]) [{'label': 'POSITIVE', 'score': 0.9998743534088135}, {'label': 'NEGATIVE', 'score': 0.9996669292449951}] ``` 为了遍历整个数据集,建议直接使用 `dataset`。这意味着您不需要一次性分配整个数据集,也不需要自己进行批处理。这应该与GPU上的自定义循环一样快。如果不是,请随时提出issue。 ```python import datasets from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset from tqdm.auto import tqdm pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0) dataset = datasets.load_dataset("superb", name="asr", split="test") # KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item # as we're not interested in the *target* part of the dataset. For sentence pair use KeyPairDataset for out in tqdm(pipe(KeyDataset(dataset, "file"))): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # .... ``` 为了方便使用,也可以使用生成器: ```python from transformers import pipeline pipe = pipeline("text-classification") def data(): while True: # This could come from a dataset, a database, a queue or HTTP request # in a server # Caveat: because this is iterative, you cannot use `num_workers > 1` variable # to use multiple threads to preprocess data. You can still have 1 thread that # does the preprocessing while the main runs the big inference yield "This is a test" for out in pipe(data()): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # .... ``` [[autodoc]] pipeline ## Pipeline batching 所有pipeline都可以使用批处理。这将在pipeline使用其流处理功能时起作用(即传递列表或 `Dataset` 或 `generator` 时)。 ```python from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset import datasets dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised") pipe = pipeline("text-classification", device=0) for out in pipe(KeyDataset(dataset, "text"), batch_size=8, truncation="only_first"): print(out) # [{'label': 'POSITIVE', 'score': 0.9998743534088135}] # Exactly the same output as before, but the content are passed # as batches to the model ``` <Tip warning={true}> 然而,这并不自动意味着性能提升。它可能是一个10倍的加速或5倍的减速,具体取决于硬件、数据和实际使用的模型。 主要是加速的示例: </Tip> ```python from transformers import pipeline from torch.utils.data import Dataset from tqdm.auto import tqdm pipe = pipeline("text-classification", device=0) class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): return "This is a test" dataset = MyDataset() for batch_size in [1, 8, 64, 256]: print("-" * 30) print(f"Streaming batch_size={batch_size}") for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)): pass ``` ``` # On GTX 970 ------------------------------ Streaming no batching 100%|██████████████████████████████████████████████████████████████████████| 5000/5000 [00:26<00:00, 187.52it/s] ------------------------------ Streaming batch_size=8 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:04<00:00, 1205.95it/s] ------------------------------ Streaming batch_size=64 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:02<00:00, 2478.24it/s] ------------------------------ Streaming batch_size=256 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:01<00:00, 2554.43it/s] (diminishing returns, saturated the GPU) ``` 主要是减速的示例: ```python class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): if i % 64 == 0: n = 100 else: n = 1 return "This is a test" * n ``` 与其他句子相比,这是一个非常长的句子。在这种情况下,**整个**批次将需要400个tokens的长度,因此整个批次将是 [64, 400] 而不是 [64, 4],从而导致较大的减速。更糟糕的是,在更大的批次上,程序会崩溃。 ``` ------------------------------ Streaming no batching 100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:05<00:00, 183.69it/s] ------------------------------ Streaming batch_size=8 100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:03<00:00, 265.74it/s] ------------------------------ Streaming batch_size=64 100%|██████████████████████████████████████████████████████████████████████| 1000/1000 [00:26<00:00, 37.80it/s] ------------------------------ Streaming batch_size=256 0%| | 0/1000 [00:00<?, ?it/s] Traceback (most recent call last): File "/home/nicolas/src/transformers/test.py", line 42, in <module> for out in tqdm(pipe(dataset, batch_size=256), total=len(dataset)): .... q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) RuntimeError: CUDA out of memory. Tried to allocate 376.00 MiB (GPU 0; 3.95 GiB total capacity; 1.72 GiB already allocated; 354.88 MiB free; 2.46 GiB reserved in total by PyTorch) ``` 对于这个问题,没有好的(通用)解决方案,效果可能因您的用例而异。经验法则如下: 对于用户,一个经验法则是: - **使用硬件测量负载性能。测量、测量、再测量。真实的数字是唯一的方法。** - 如果受到延迟的限制(进行推理的实时产品),不要进行批处理。 - 如果使用CPU,不要进行批处理。 - 如果您在GPU上处理的是吞吐量(您希望在大量静态数据上运行模型),则: - 如果对序列长度的大小没有概念("自然"数据),默认情况下不要进行批处理,进行测试并尝试逐渐添加,添加OOM检查以在失败时恢复(如果您不能控制序列长度,它将在某些时候失败)。 - 如果您的序列长度非常规律,那么批处理更有可能非常有趣,进行测试并推动它,直到出现OOM。 - GPU越大,批处理越有可能变得更有趣 - 一旦启用批处理,确保能够很好地处理OOM。 ## Pipeline chunk batching `zero-shot-classification` 和 `question-answering` 在某种意义上稍微特殊,因为单个输入可能会导致模型的多次前向传递。在正常情况下,这将导致 `batch_size` 参数的问题。 为了规避这个问题,这两个pipeline都有点特殊,它们是 `ChunkPipeline` 而不是常规的 `Pipeline`。简而言之: ```python preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs) ``` 现在变成: ```python all_model_outputs = [] for preprocessed in pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs) ``` 这对您的代码应该是非常直观的,因为pipeline的使用方式是相同的。 这是一个简化的视图,因为Pipeline可以自动处理批次!这意味着您不必担心您的输入实际上会触发多少次前向传递,您可以独立于输入优化 `batch_size`。前面部分的注意事项仍然适用。 ## Pipeline自定义 如果您想要重载特定的pipeline。 请随时为您手头的任务创建一个issue,Pipeline的目标是易于使用并支持大多数情况,因此 `transformers` 可能支持您的用例。 如果您想简单地尝试一下,可以: - 继承您选择的pipeline ```python class MyPipeline(TextClassificationPipeline): def postprocess(): # Your code goes here scores = scores * 100 # And here my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) # or if you use *pipeline* function, then: my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline) ``` 这样就可以让您编写所有想要的自定义代码。 ## 实现一个pipeline [实现一个新的pipeline](../add_new_pipeline) ## 音频 可用于音频任务的pipeline包括以下几种。 ### AudioClassificationPipeline [[autodoc]] AudioClassificationPipeline - __call__ - all ### AutomaticSpeechRecognitionPipeline [[autodoc]] AutomaticSpeechRecognitionPipeline - __call__ - all ### TextToAudioPipeline [[autodoc]] TextToAudioPipeline - __call__ - all ### ZeroShotAudioClassificationPipeline [[autodoc]] ZeroShotAudioClassificationPipeline - __call__ - all ## 计算机视觉 可用于计算机视觉任务的pipeline包括以下几种。 ### DepthEstimationPipeline [[autodoc]] DepthEstimationPipeline - __call__ - all ### ImageClassificationPipeline [[autodoc]] ImageClassificationPipeline - __call__ - all ### ImageSegmentationPipeline [[autodoc]] ImageSegmentationPipeline - __call__ - all ### ImageToImagePipeline [[autodoc]] ImageToImagePipeline - __call__ - all ### ObjectDetectionPipeline [[autodoc]] ObjectDetectionPipeline - __call__ - all ### VideoClassificationPipeline [[autodoc]] VideoClassificationPipeline - __call__ - all ### ZeroShotImageClassificationPipeline [[autodoc]] ZeroShotImageClassificationPipeline - __call__ - all ### ZeroShotObjectDetectionPipeline [[autodoc]] ZeroShotObjectDetectionPipeline - __call__ - all ## 自然语言处理 可用于自然语言处理任务的pipeline包括以下几种。 ### FillMaskPipeline [[autodoc]] FillMaskPipeline - __call__ - all ### NerPipeline [[autodoc]] NerPipeline See [`TokenClassificationPipeline`] for all details. ### QuestionAnsweringPipeline [[autodoc]] QuestionAnsweringPipeline - __call__ - all ### SummarizationPipeline [[autodoc]] SummarizationPipeline - __call__ - all ### TableQuestionAnsweringPipeline [[autodoc]] TableQuestionAnsweringPipeline - __call__ ### TextClassificationPipeline [[autodoc]] TextClassificationPipeline - __call__ - all ### TextGenerationPipeline [[autodoc]] TextGenerationPipeline - __call__ - all ### Text2TextGenerationPipeline [[autodoc]] Text2TextGenerationPipeline - __call__ - all ### TokenClassificationPipeline [[autodoc]] TokenClassificationPipeline - __call__ - all ### TranslationPipeline [[autodoc]] TranslationPipeline - __call__ - all ### ZeroShotClassificationPipeline [[autodoc]] ZeroShotClassificationPipeline - __call__ - all ## 多模态 可用于多模态任务的pipeline包括以下几种。 ### DocumentQuestionAnsweringPipeline [[autodoc]] DocumentQuestionAnsweringPipeline - __call__ - all ### FeatureExtractionPipeline [[autodoc]] FeatureExtractionPipeline - __call__ - all ### ImageFeatureExtractionPipeline [[autodoc]] ImageFeatureExtractionPipeline - __call__ - all ### ImageToTextPipeline [[autodoc]] ImageToTextPipeline - __call__ - all ### ImageTextToTextPipeline [[autodoc]] ImageTextToTextPipeline - __call__ - all ### MaskGenerationPipeline [[autodoc]] MaskGenerationPipeline - __call__ - all ### VisualQuestionAnsweringPipeline [[autodoc]] VisualQuestionAnsweringPipeline - __call__ - all ## Parent class: `Pipeline` [[autodoc]] Pipeline
transformers/docs/source/zh/main_classes/pipelines.md/0
{ "file_path": "transformers/docs/source/zh/main_classes/pipelines.md", "repo_id": "transformers", "token_count": 6364 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 推理pipeline [`pipeline`] 让使用[Hub](https://huggingface.co/models)上的任何模型进行任何语言、计算机视觉、语音以及多模态任务的推理变得非常简单。即使您对特定的模态没有经验,或者不熟悉模型的源码,您仍然可以使用[`pipeline`]进行推理!本教程将教您: - 如何使用[`pipeline`] 进行推理。 - 如何使用特定的`tokenizer`(分词器)或模型。 - 如何使用[`pipeline`] 进行音频、视觉和多模态任务的推理。 <Tip> 请查看[`pipeline`]文档以获取已支持的任务和可用参数的完整列表。 </Tip> ## Pipeline使用 虽然每个任务都有一个关联的[`pipeline`],但使用通用的抽象的[`pipeline`]更加简单,其中包含所有特定任务的`pipelines`。[`pipeline`]会自动加载一个默认模型和一个能够进行任务推理的预处理类。让我们以使用[`pipeline`]进行自动语音识别(ASR)或语音转文本为例。 1. 首先,创建一个[`pipeline`]并指定推理任务: ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition") ``` 2. 将您的输入传递给[`pipeline`]。对于语音识别,这通常是一个音频输入文件: ```py >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'} ``` 您没有得到您期望的结果?可以在Hub上查看一些[最受欢迎的自动语音识别模型](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending) ,看看是否可以获得更好的转录。 让我们尝试来自 OpenAI 的[Whisper large-v2](https://huggingface.co/openai/whisper-large) 模型。Whisperb比Wav2Vec2晚2年发布,使用接近10倍的数据进行了训练。因此,它在大多数下游基准测试上击败了Wav2Vec2。 它还具有预测标点和大小写的附加优势,而Wav2Vec2则无法实现这些功能。 让我们在这里尝试一下,看看它的表现如何: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` 现在这个结果看起来更准确了!要进行深入的Wav2Vec2与Whisper比较,请参阅[音频变换器课程](https://huggingface.co/learn/audio-course/chapter5/asr_models)。 我们鼓励您在 Hub 上查看不同语言的模型,以及专业领域的模型等。您可以在Hub上直接查看并比较模型的结果,以确定是否适合或处理边缘情况是否比其他模型更好。如果您没有找到适用于您的用例的模型,您始终可以[训练](training)自己的模型! 如果您有多个输入,您可以将输入作为列表传递: ```py transcriber( [ "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac", "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac", ] ) ``` `Pipelines`非常适合用于测试,因为从一个模型切换到另一个模型非常琐碎;但是,还有一些方法可以将它们优化后用于大型工作负载而不仅仅是测试。请查看以下指南,深入探讨如何迭代整个数据集或在Web服务器中使用`Pipelines`: * [在数据集上使用流水线](#using-pipelines-on-a-dataset) * [在Web服务器中使用流水线](./pipeline_webserver) ## 参数 [`pipeline`] 支持许多参数;有些是适用于特定任务的,而有些适用于所有`pipeline`。通常情况下,您可以在任何地方指定对应参数: ```py transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1) out = transcriber(...) # This will use `my_parameter=1`. out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`. out = transcriber(...) # This will go back to using `my_parameter=1`. ``` 让我们查看其中的三个重要参数: ### 设备 如果您使用 `device=n`,`pipeline`会自动将模型放在指定的设备上。无论您使用PyTorch还是Tensorflow,这都可以工作。 ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0) ``` 如果模型对于单个GPU来说过于庞大,并且您正在使用PyTorch,您可以设置 `device_map="auto"` 以自动确定如何加载和存储模型权重。使用 `device_map` 参数需要安装🤗 [Accelerate](https://huggingface.co/docs/accelerate) 软件包: ```bash pip install --upgrade accelerate ``` 以下代码会自动在各个设备上加载和存储模型权重: ```py transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto") ``` 请注意,如果传递了 `device_map="auto"`,在实例化您的 `pipeline` 时不需要添加 `device=device` 参数,否则可能会遇到一些意外的状况! ### 批量大小 默认情况下,`pipelines`不会进行批量推理,原因在[这里](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching)详细解释。因为批处理不一定更快,实际上在某些情况下可能会更慢。 但如果在您的用例中起作用,您可以使用: ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2) audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)] texts = transcriber(audio_filenames) ``` 以上代码会在提供的4个音频文件上运行`pipeline`,它会将它们以2个一组的批次传递给模型(模型在GPU上,此时批处理更有可能有所帮助),而您无需编写额外的代码。输出应始终与没有批处理时收到的结果相一致。它只是一种帮助您更快地使用`pipeline`的方式。 `pipeline`也可以减轻一些批处理的复杂性,因为对于某些`pipeline`,需要将单个项目(如长音频文件)分成多个部分以供模型处理。`pipeline`为您执行这种[*chunk batching*](./main_classes/pipelines#pipeline-chunk-batching)。 ### 任务特定参数 所有任务都提供了特定于任务的参数,这些参数提供额外的灵活性和选择,以帮助您完成工作。 例如,[`transformers.AutomaticSpeechRecognitionPipeline.__call__`] 方法具有一个 `return_timestamps` 参数,对于字幕视频似乎很有帮助: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]} ``` 正如您所看到的,模型推断出了文本,还输出了各个句子发音的**时间**。 每个任务都有许多可用的参数,因此请查看每个任务的API参考,以了解您可以进行哪些调整!例如,[`~transformers.AutomaticSpeechRecognitionPipeline`] 具有 `chunk_length_s` 参数,对于处理非常长的音频文件(例如,为整部电影或长达一小时的视频配字幕)非常有帮助,这通常是模型无法单独处理的: ```python >>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30, return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/sanchit-gandhi/librispeech_long/resolve/main/audio.wav") {'text': " Chapter 16. I might have told you of the beginning of this liaison in a few lines, but I wanted you to see every step by which we came. I, too, agree to whatever Marguerite wished, Marguerite to be unable to live apart from me. It was the day after the evening... ``` 如果您找不到一个真正有帮助的参数,欢迎[提出请求](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml)! ## 在数据集上使用pipelines `pipelines`也可以对大型数据集进行推理。我们建议使用迭代器来完成这一任务,这是最简单的方法: ```py def data(): for i in range(1000): yield f"My example {i}" pipe = pipeline(model="openai-community/gpt2", device=0) generated_characters = 0 for out in pipe(data()): generated_characters += len(out[0]["generated_text"]) ``` 迭代器 `data()` 会产生每个结果,`pipelines`会自动识别输入为可迭代对象,并在GPU上处理数据的同时开始获取数据(在底层使用[DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader))。这一点非常重要,因为您不必为整个数据集分配内存,可以尽可能快地将数据传送到GPU。 由于批处理可以加速处理,因此在这里尝试调整 `batch_size` 参数可能会很有用。 迭代数据集的最简单方法就是从🤗 [Datasets](https://github.com/huggingface/datasets/) 中加载数据集: ```py # KeyDataset is a util that will just output the item we're interested in. from transformers.pipelines.pt_utils import KeyDataset from datasets import load_dataset pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]") for out in pipe(KeyDataset(dataset, "audio")): print(out) ``` ## 在Web服务器上使用pipelines <Tip> 创建推理引擎是一个复杂的主题,值得有自己的页面。 </Tip> [链接](./pipeline_webserver) ## 视觉流水线 对于视觉任务,使用[`pipeline`] 几乎是相同的。 指定您的任务并将图像传递给分类器。图像可以是链接、本地路径或base64编码的图像。例如,下面显示的是哪种品种的猫? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(model="google/vit-base-patch16-224") >>> preds = vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ``` ## 文本流水线 对于NLP任务,使用[`pipeline`] 几乎是相同的。 ```py >>> from transformers import pipeline >>> # This model is a `zero-shot-classification` model. >>> # It will classify text, except you are free to choose any label you might imagine >>> classifier = pipeline(model="facebook/bart-large-mnli") >>> classifier( ... "I have a problem with my iphone that needs to be resolved asap!!", ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], ... ) {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} ``` ## 多模态流水线 [`pipeline`] 支持多个模态。例如,视觉问题回答(VQA)任务结合了文本和图像。请随意使用您喜欢的任何图像链接和您想要问关于该图像的问题。图像可以是URL或图像的本地路径。 例如,如果您使用这个[invoice image](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png): ```py >>> from transformers import pipeline >>> vqa = pipeline(model="impira/layoutlm-document-qa") >>> output = vqa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) >>> output[0]["score"] = round(output[0]["score"], 3) >>> output [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` <Tip> 要运行上面的示例,除了🤗 Transformers之外,您需要安装[`pytesseract`](https://pypi.org/project/pytesseract/)。 ```bash sudo apt install -y tesseract-ocr pip install pytesseract ``` </Tip> ## 在大模型上使用🤗 `accelerate`和`pipeline`: 您可以轻松地使用🤗 `accelerate`在大模型上运行 `pipeline`!首先确保您已经使用 `pip install accelerate` 安装了 `accelerate`。 首先使用 `device_map="auto"` 加载您的模型!我们将在示例中使用 `facebook/opt-1.3b`。 ```py # pip install accelerate import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", torch_dtype=torch.bfloat16, device_map="auto") output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` 如果安装 `bitsandbytes` 并添加参数 `load_in_8bit=True`,您还可以传递8位加载的模型。 ```py # pip install accelerate bitsandbytes import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", device_map="auto", model_kwargs={"load_in_8bit": True}) output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` 请注意,您可以将`checkpoint `替换为任何支持大模型加载的Hugging Face模型,比如BLOOM!
transformers/docs/source/zh/pipeline_tutorial.md/0
{ "file_path": "transformers/docs/source/zh/pipeline_tutorial.md", "repo_id": "transformers", "token_count": 7471 }
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Automatic Speech Recognition - Flax Examples ## Sequence to Sequence The script [`run_flax_speech_recognition_seq2seq.py`](https://github.com/huggingface/transformers/blob/main/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py) can be used to fine-tune any [Flax Speech Sequence-to-Sequence Model](https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.FlaxAutoModelForSpeechSeq2Seq) for automatic speech recognition on one of the [official speech recognition datasets](https://huggingface.co/datasets?task_ids=task_ids:automatic-speech-recognition) or a custom dataset. This includes the Whisper model from OpenAI, or a warm-started Speech-Encoder-Decoder Model, an example for which is included below. ### Whisper Model We can load all components of the Whisper model directly from the pretrained checkpoint, including the pretrained model weights, feature extractor and tokenizer. We simply have to specify the id of fine-tuning dataset and the necessary training hyperparameters. The following example shows how to fine-tune the [Whisper small](https://huggingface.co/openai/whisper-small) checkpoint on the Hindi subset of the [Common Voice 13](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0) dataset. Note that before running this script you must accept the dataset's [terms of use](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0) and register your Hugging Face Hub token on your device by running `huggingface-hub login`. ```bash python run_flax_speech_recognition_seq2seq.py \ --model_name_or_path="openai/whisper-small" \ --dataset_name="mozilla-foundation/common_voice_13_0" \ --dataset_config_name="hi" \ --language="hindi" \ --train_split_name="train+validation" \ --eval_split_name="test" \ --output_dir="./whisper-small-hi-flax" \ --per_device_train_batch_size="16" \ --per_device_eval_batch_size="16" \ --num_train_epochs="10" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --generation_max_length="40" \ --preprocessing_num_workers="32" \ --dataloader_num_workers="32" \ --max_duration_in_seconds="30" \ --text_column_name="sentence" \ --overwrite_output_dir \ --do_train \ --do_eval \ --predict_with_generate \ --push_to_hub \ --use_auth_token ``` On a TPU v4-8, training should take approximately 25 minutes, with a final cross-entropy loss of 0.02 and word error rate of **34%**. See the checkpoint [sanchit-gandhi/whisper-small-hi-flax](https://huggingface.co/sanchit-gandhi/whisper-small-hi-flax) for an example training run.
transformers/examples/flax/speech-recognition/README.md/0
{ "file_path": "transformers/examples/flax/speech-recognition/README.md", "repo_id": "transformers", "token_count": 1039 }
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fine-tuning the library models for question-answering.""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import transformers from transformers import ( AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, DataCollatorWithPadding, HfArgumentParser, SquadDataset, Trainer, TrainingArguments, ) from transformers import SquadDataTrainingArguments as DataTrainingArguments from transformers.trainer_utils import is_main_process logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."}) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Prepare Question-Answering task # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets is_language_sensitive = hasattr(model.config, "lang2id") train_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir ) if training_args.do_train else None ) eval_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, mode="dev", is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir, ) if training_args.do_eval else None ) # Data collator data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) if training_args.fp16 else None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/legacy/question-answering/run_squad_trainer.py/0
{ "file_path": "transformers/examples/legacy/question-answering/run_squad_trainer.py", "repo_id": "transformers", "token_count": 2569 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeq2SeqDataset, Seq2SeqDataset BERT_BASE_CASED = "google-bert/bert-base-cased" PEGASUS_XSUM = "google/pegasus-xsum" ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] T5_TINY = "patrickvonplaten/t5-tiny-random" BART_TINY = "sshleifer/bart-tiny-random" MBART_TINY = "sshleifer/tiny-mbart" MARIAN_TINY = "sshleifer/tiny-marian-en-de" def _dump_articles(path: Path, articles: list): content = "\n".join(articles) Path(path).open("w").writelines(content) def make_test_data_dir(tmp_dir): for split in ["train", "val", "test"]: _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) return tmp_dir class TestAll(TestCasePlus): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ], ) @slow def test_seq2seq_dataset_truncation(self, tok_name): tokenizer = AutoTokenizer.from_pretrained(tok_name) tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) max_len_source = max(len(tokenizer.encode(a)) for a in ARTICLES) max_len_target = max(len(tokenizer.encode(a)) for a in SUMMARIES) max_src_len = 4 max_tgt_len = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated src_lang, tgt_lang = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. train_dataset = Seq2SeqDataset( tokenizer, data_dir=tmp_dir, type_path="train", max_source_length=max_src_len, max_target_length=max_tgt_len, # ignored src_lang=src_lang, tgt_lang=tgt_lang, ) dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn) for batch in dataloader: assert isinstance(batch, dict) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], tokenizer.pad_token_id) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED]) def test_legacy_dataset_truncation(self, tok): tokenizer = AutoTokenizer.from_pretrained(tok) tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) max_len_source = max(len(tokenizer.encode(a)) for a in ARTICLES) max_len_target = max(len(tokenizer.encode(a)) for a in SUMMARIES) trunc_target = 4 train_dataset = LegacySeq2SeqDataset( tokenizer, data_dir=tmp_dir, type_path="train", max_source_length=20, max_target_length=trunc_target, ) dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def test_pack_dataset(self): tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") tmp_dir = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) orig_examples = tmp_dir.joinpath("train.source").open().readlines() save_dir = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) pack_data_dir(tokenizer, tmp_dir, 128, save_dir) orig_paths = {x.name for x in tmp_dir.iterdir()} new_paths = {x.name for x in save_dir.iterdir()} packed_examples = save_dir.joinpath("train.source").open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(packed_examples) < len(orig_examples) assert len(packed_examples) == 1 assert len(packed_examples[0]) == sum(len(x) for x in orig_examples) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE, reason="This test requires fairseq") def test_dynamic_batch_size(self): if not FAIRSEQ_AVAILABLE: return ds, max_tokens, tokenizer = self._get_dataset(max_len=64) required_batch_size_multiple = 64 batch_sampler = ds.make_dynamic_sampler(max_tokens, required_batch_size_multiple=required_batch_size_multiple) batch_sizes = [len(x) for x in batch_sampler] assert len(set(batch_sizes)) > 1 # it's not dynamic batch size if every batch is the same length assert sum(batch_sizes) == len(ds) # no dropped or added examples data_loader = DataLoader(ds, batch_sampler=batch_sampler, collate_fn=ds.collate_fn, num_workers=2) failures = [] num_src_per_batch = [] for batch in data_loader: src_shape = batch["input_ids"].shape bs = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple num_src_tokens = np.product(batch["input_ids"].shape) num_src_per_batch.append(num_src_tokens) if num_src_tokens > (max_tokens * 1.1): failures.append(num_src_tokens) assert num_src_per_batch[0] == max(num_src_per_batch) if failures: raise AssertionError(f"too many tokens in {len(failures)} batches") def test_sortish_sampler_reduces_padding(self): ds, _, tokenizer = self._get_dataset(max_len=512) bs = 2 sortish_sampler = ds.make_sortish_sampler(bs, shuffle=False) naive_dl = DataLoader(ds, batch_size=bs, collate_fn=ds.collate_fn, num_workers=2) sortish_dl = DataLoader(ds, batch_size=bs, collate_fn=ds.collate_fn, num_workers=2, sampler=sortish_sampler) pad = tokenizer.pad_token_id def count_pad_tokens(data_loader, k="input_ids"): return [batch[k].eq(pad).sum().item() for batch in data_loader] assert sum(count_pad_tokens(sortish_dl, k="labels")) < sum(count_pad_tokens(naive_dl, k="labels")) assert sum(count_pad_tokens(sortish_dl)) < sum(count_pad_tokens(naive_dl)) assert len(sortish_dl) == len(naive_dl) def _get_dataset(self, n_obs=1000, max_len=128): if os.getenv("USE_REAL_DATA", False): data_dir = "examples/seq2seq/wmt_en_ro" max_tokens = max_len * 2 * 64 if not Path(data_dir).joinpath("train.len").exists(): save_len_file(MARIAN_TINY, data_dir) else: data_dir = "examples/seq2seq/test_data/wmt_en_ro" max_tokens = max_len * 4 save_len_file(MARIAN_TINY, data_dir) tokenizer = AutoTokenizer.from_pretrained(MARIAN_TINY) ds = Seq2SeqDataset( tokenizer, data_dir=data_dir, type_path="train", max_source_length=max_len, max_target_length=max_len, n_obs=n_obs, ) return ds, max_tokens, tokenizer def test_distributed_sortish_sampler_splits_indices_between_procs(self): ds, max_tokens, tokenizer = self._get_dataset() ids1 = set(DistributedSortishSampler(ds, 256, num_replicas=2, rank=0, add_extra_examples=False)) ids2 = set(DistributedSortishSampler(ds, 256, num_replicas=2, rank=1, add_extra_examples=False)) assert ids1.intersection(ids2) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ], ) def test_dataset_kwargs(self, tok_name): tokenizer = AutoTokenizer.from_pretrained(tok_name, use_fast=False) if tok_name == MBART_TINY: train_dataset = Seq2SeqDataset( tokenizer, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()), type_path="train", max_source_length=4, max_target_length=8, src_lang="EN", tgt_lang="FR", ) kwargs = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: train_dataset = Seq2SeqDataset( tokenizer, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()), type_path="train", max_source_length=4, max_target_length=8, ) kwargs = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(kwargs) == 1 if tok_name == BART_TINY else len(kwargs) == 0
transformers/examples/legacy/seq2seq/old_test_datasets.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/old_test_datasets.py", "repo_id": "transformers", "token_count": 5152 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from dataclasses import dataclass, field from typing import Optional from seq2seq_trainer import arg_to_scheduler from transformers import TrainingArguments logger = logging.getLogger(__name__) @dataclass class Seq2SeqTrainingArguments(TrainingArguments): """ Parameters: label_smoothing (:obj:`float`, `optional`, defaults to 0): The label smoothing epsilon to apply (if not zero). sortish_sampler (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to SortishSampler or not. It sorts the inputs according to lengths in-order to minimizing the padding size. predict_with_generate (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). """ label_smoothing: Optional[float] = field( default=0.0, metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) sortish_sampler: bool = field(default=False, metadata={"help": "Whether to SortishSampler or not."}) predict_with_generate: bool = field( default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) adafactor: bool = field(default=False, metadata={"help": "whether to use adafactor"}) encoder_layerdrop: Optional[float] = field( default=None, metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) decoder_layerdrop: Optional[float] = field( default=None, metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) dropout: Optional[float] = field(default=None, metadata={"help": "Dropout probability. Goes into model.config."}) attention_dropout: Optional[float] = field( default=None, metadata={"help": "Attention dropout probability. Goes into model.config."} ) lr_scheduler: Optional[str] = field( default="linear", metadata={"help": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}"}, )
transformers/examples/legacy/seq2seq/seq2seq_training_args.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/seq2seq_training_args.py", "repo_id": "transformers", "token_count": 888 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A simple launcher script for TPU training Inspired by https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py :: >>> python xla_spawn.py --num_cores=NUM_CORES_YOU_HAVE YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) """ import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def parse_args(): """ Helper function parsing the command line options @retval ArgumentParser """ parser = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores", type=int, default=1, help="Number of TPU cores to use (1 or 8).") # positional parser.add_argument( "training_script", type=str, help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ), ) # rest from the training program parser.add_argument("training_script_args", nargs=REMAINDER) return parser.parse_args() def main(): args = parse_args() # Import training_script as a module. script_fpath = Path(args.training_script) sys.path.append(str(script_fpath.parent.resolve())) mod_name = script_fpath.stem mod = importlib.import_module(mod_name) # Patch sys.argv sys.argv = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores)] xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores) if __name__ == "__main__": main()
transformers/examples/legacy/seq2seq/xla_spawn.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/xla_spawn.py", "repo_id": "transformers", "token_count": 887 }
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # VisionTextDualEncoder and CLIP model training examples The following example showcases how to train a CLIP-like vision-text dual encoder model using a pre-trained vision and text encoder. Such a model can be used for natural language image search and potentially zero-shot image classification. The model is inspired by [CLIP](https://openai.com/blog/clip/), introduced by Alec Radford et al. The idea is to train a vision encoder and a text encoder jointly to project the representation of images and their captions into the same embedding space, such that the caption embeddings are located near the embeddings of the images they describe. ### Download COCO dataset (2017) This example uses COCO dataset (2017) through a custom dataset script, which requires users to manually download the COCO dataset before training. ```bash mkdir data cd data wget http://images.cocodataset.org/zips/train2017.zip wget http://images.cocodataset.org/zips/val2017.zip wget http://images.cocodataset.org/zips/test2017.zip wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip wget http://images.cocodataset.org/annotations/image_info_test2017.zip cd .. ``` Having downloaded COCO dataset manually you should be able to load with the `ydshieh/coc_dataset_script` dataset loading script: ```py import os import datasets COCO_DIR = os.path.join(os.getcwd(), "data") ds = datasets.load_dataset("ydshieh/coco_dataset_script", "2017", data_dir=COCO_DIR) ``` ### Create a model from a vision encoder model and a text encoder model Next, we create a [VisionTextDualEncoderModel](https://huggingface.co/docs/transformers/model_doc/vision-text-dual-encoder#visiontextdualencoder). The `VisionTextDualEncoderModel` class lets you load any vision and text encoder model to create a dual encoder. Here is an example of how to load the model using pre-trained vision and text models. ```python3 from transformers import ( VisionTextDualEncoderModel, VisionTextDualEncoderProcessor, AutoTokenizer, AutoImageProcessor ) model = VisionTextDualEncoderModel.from_vision_text_pretrained( "openai/clip-vit-base-patch32", "FacebookAI/roberta-base" ) tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base") image_processor = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32") processor = VisionTextDualEncoderProcessor(image_processor, tokenizer) # save the model and processor model.save_pretrained("clip-roberta") processor.save_pretrained("clip-roberta") ``` This loads both the text and vision encoders using pre-trained weights, the projection layers are randomly initialized except for CLIP's vision model. If you use CLIP to initialize the vision model then the vision projection weights are also loaded using the pre-trained weights. ### Train the model Finally, we can run the example script to train the model: ```bash python examples/pytorch/contrastive-image-text/run_clip.py \ --output_dir ./clip-roberta-finetuned \ --model_name_or_path ./clip-roberta \ --data_dir $PWD/data \ --dataset_name ydshieh/coco_dataset_script \ --dataset_config_name=2017 \ --image_column image_path \ --caption_column caption \ --remove_unused_columns=False \ --do_train --do_eval \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="64" \ --learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \ --overwrite_output_dir \ --push_to_hub ```
transformers/examples/pytorch/contrastive-image-text/README.md/0
{ "file_path": "transformers/examples/pytorch/contrastive-image-text/README.md", "repo_id": "transformers", "token_count": 1268 }
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> ## Language model training Fine-tuning (or training from scratch) the library models for language modeling on a text dataset for GPT, GPT-2, ALBERT, BERT, DistilBERT, RoBERTa, XLNet... GPT and GPT-2 are trained or fine-tuned using a causal language modeling (CLM) loss while ALBERT, BERT, DistilBERT and RoBERTa are trained or fine-tuned using a masked language modeling (MLM) loss. XLNet uses permutation language modeling (PLM), you can find more information about the differences between those objectives in our [model summary](https://huggingface.co/transformers/model_summary.html). There are two sets of scripts provided. The first set leverages the Trainer API. The second set with `no_trainer` in the suffix uses a custom training loop and leverages the 🤗 Accelerate library . Both sets use the 🤗 Datasets library. You can easily customize them to your needs if you need extra processing on your datasets. **Note:** The old script `run_language_modeling.py` is still available [here](https://github.com/huggingface/transformers/blob/main/examples/legacy/run_language_modeling.py). The following examples, will run on datasets hosted on our [hub](https://huggingface.co/datasets) or with your own text files for training and validation. We give examples of both below. ### GPT-2/GPT and causal language modeling The following example fine-tunes GPT-2 on WikiText-2. We're using the raw WikiText-2 (no tokens were replaced before the tokenization). The loss here is that of causal language modeling. ```bash python run_clm.py \ --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-clm ``` This takes about half an hour to train on a single K80 GPU and about one minute for the evaluation to run. It reaches a score of ~20 perplexity once fine-tuned on the dataset. To run on your own training and validation files, use the following command: ```bash python run_clm.py \ --model_name_or_path openai-community/gpt2 \ --train_file path_to_train_file \ --validation_file path_to_validation_file \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-clm ``` This uses the built in HuggingFace `Trainer` for training. If you want to use a custom training loop, you can utilize or adapt the `run_clm_no_trainer.py` script. Take a look at the script for a list of supported arguments. An example is shown below: ```bash python run_clm_no_trainer.py \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --model_name_or_path openai-community/gpt2 \ --output_dir /tmp/test-clm ``` ### GPT-2/GPT and causal language modeling with fill-in-the middle objective The following example fine-tunes GPT-2 on WikiText-2 but using the Fill-in-middle training objective. FIM objective was proposed in [Efficient Training of Language Models to Fill in the Middle](https://arxiv.org/abs/2207.14255). They showed that autoregressive language models can learn to infill text after applying a straightforward transformation to the dataset, which simply moves a span of text from the middle of a document to its end. We're using the raw WikiText-2 (no tokens were replaced before the tokenization). The loss here is that of causal language modeling. ```bash python run_fim.py \ --model_name_or_path gpt2 \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --fim_rate 0.5 \ --fim_spm_rate 0.2 \ --do_train \ --do_eval \ --output_dir /tmp/test-clm ``` To run on your own training and validation files, use the following command: ```bash python run_fim.py \ --model_name_or_path gpt2 \ --train_file path_to_train_file \ --validation_file path_to_validation_file \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --fim_rate 0.5 \ --fim_spm_rate 0.2 \ --do_train \ --do_eval \ --output_dir /tmp/test-clm ``` This uses the built in HuggingFace `Trainer` for training. If you want to use a custom training loop, you can utilize or adapt the `run_fim_no_trainer.py` script. Take a look at the script for a list of supported arguments. An example is shown below: ```bash python run_fim_no_trainer.py \ --model_name_or_path gpt2 \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --model_name_or_path gpt2 \ --fim_rate 0.5 \ --fim_spm_rate 0.2 \ --output_dir /tmp/test-clm ``` **Note**: Passing in FIM rate as `0.5` means that FIM transformations will be applied to the dataset with a probability of 50%. Whereas passing in FIM SPM rate as `0.2` means that 20% of FIM transformations will use SPM (or Suffix-Prefix-Middle) and the remaining 80% will use PSM (or Prefix-Suffix-Middle) mode of transformation. ### RoBERTa/BERT/DistilBERT and masked language modeling The following example fine-tunes RoBERTa on WikiText-2. Here too, we're using the raw WikiText-2. The loss is different as BERT/RoBERTa have a bidirectional mechanism; we're therefore using the same loss that was used during their pre-training: masked language modeling. In accordance to the RoBERTa paper, we use dynamic masking rather than static masking. The model may, therefore, converge slightly slower (over-fitting takes more epochs). ```bash python run_mlm.py \ --model_name_or_path FacebookAI/roberta-base \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-mlm ``` To run on your own training and validation files, use the following command: ```bash python run_mlm.py \ --model_name_or_path FacebookAI/roberta-base \ --train_file path_to_train_file \ --validation_file path_to_validation_file \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-mlm ``` If your dataset is organized with one sample per line, you can use the `--line_by_line` flag (otherwise the script concatenates all texts and then splits them in blocks of the same length). This uses the built in HuggingFace `Trainer` for training. If you want to use a custom training loop, you can utilize or adapt the `run_mlm_no_trainer.py` script. Take a look at the script for a list of supported arguments. An example is shown below: ```bash python run_mlm_no_trainer.py \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --model_name_or_path FacebookAI/roberta-base \ --output_dir /tmp/test-mlm ``` **Note:** On TPU, you should use the flag `--pad_to_max_length` in conjunction with the `--line_by_line` flag to make sure all your batches have the same length. ### Whole word masking This part was moved to `examples/research_projects/mlm_wwm`. ### XLNet and permutation language modeling XLNet uses a different training objective, which is permutation language modeling. It is an autoregressive method to learn bidirectional contexts by maximizing the expected likelihood over all permutations of the input sequence factorization order. We use the `--plm_probability` flag to define the ratio of length of a span of masked tokens to surrounding context length for permutation language modeling. The `--max_span_length` flag may also be used to limit the length of a span of masked tokens used for permutation language modeling. Here is how to fine-tune XLNet on wikitext-2: ```bash python run_plm.py \ --model_name_or_path=xlnet/xlnet-base-cased \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-plm ``` To fine-tune it on your own training and validation file, run: ```bash python run_plm.py \ --model_name_or_path=xlnet/xlnet-base-cased \ --train_file path_to_train_file \ --validation_file path_to_validation_file \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-plm ``` If your dataset is organized with one sample per line, you can use the `--line_by_line` flag (otherwise the script concatenates all texts and then splits them in blocks of the same length). **Note:** On TPU, you should use the flag `--pad_to_max_length` in conjunction with the `--line_by_line` flag to make sure all your batches have the same length. ## Streaming To use the streaming dataset mode which can be very useful for large datasets, add `--streaming` to the command line. This is supported by `run_mlm.py`, `run_clm.py` and `run_fim.py`. Make sure to adapt the other scripts to your use case by taking inspiration from them. ## Low Cpu Memory Usage To use low cpu memory mode which can be very useful for LLM, add `--low_cpu_mem_usage` to the command line. This is currently supported by `run_clm.py`,`run_mlm.py`, `run_plm.py`, `run_fim.py`, `run_mlm_no_trainer.py`, `run_clm_no_trainer.py` and `run_fim_no_trainer.py`. ## Creating a model on the fly When training a model from scratch, configuration values may be overridden with the help of `--config_overrides`: ```bash python run_clm.py --model_type gpt2 --tokenizer_name openai-community/gpt2 \ --config_overrides="n_embd=1024,n_head=16,n_layer=48,n_positions=102" \ [...] ``` This feature is only available in `run_clm.py`, `run_plm.py`, `run_mlm.py` and `run_fim.py`.
transformers/examples/pytorch/language-modeling/README.md/0
{ "file_path": "transformers/examples/pytorch/language-modeling/README.md", "repo_id": "transformers", "token_count": 3477 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and """Finetuning any 🤗 Transformers model supported by AutoModelForObjectDetection for object detection leveraging the Trainer API.""" import logging import os import sys from dataclasses import dataclass, field from functools import partial from typing import Any, List, Mapping, Optional, Tuple, Union import albumentations as A import numpy as np import torch from datasets import load_dataset from torchmetrics.detection.mean_ap import MeanAveragePrecision import transformers from transformers import ( AutoConfig, AutoImageProcessor, AutoModelForObjectDetection, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.image_processing_utils import BatchFeature from transformers.image_transforms import center_to_corners_format from transformers.trainer import EvalPrediction from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/object-detection/requirements.txt") @dataclass class ModelOutput: logits: torch.Tensor pred_boxes: torch.Tensor def format_image_annotations_as_coco( image_id: str, categories: List[int], areas: List[float], bboxes: List[Tuple[float]] ) -> dict: """Format one set of image annotations to the COCO format Args: image_id (str): image id. e.g. "0001" categories (List[int]): list of categories/class labels corresponding to provided bounding boxes areas (List[float]): list of corresponding areas to provided bounding boxes bboxes (List[Tuple[float]]): list of bounding boxes provided in COCO format ([center_x, center_y, width, height] in absolute coordinates) Returns: dict: { "image_id": image id, "annotations": list of formatted annotations } """ annotations = [] for category, area, bbox in zip(categories, areas, bboxes): formatted_annotation = { "image_id": image_id, "category_id": category, "iscrowd": 0, "area": area, "bbox": list(bbox), } annotations.append(formatted_annotation) return { "image_id": image_id, "annotations": annotations, } def convert_bbox_yolo_to_pascal(boxes: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor: """ Convert bounding boxes from YOLO format (x_center, y_center, width, height) in range [0, 1] to Pascal VOC format (x_min, y_min, x_max, y_max) in absolute coordinates. Args: boxes (torch.Tensor): Bounding boxes in YOLO format image_size (Tuple[int, int]): Image size in format (height, width) Returns: torch.Tensor: Bounding boxes in Pascal VOC format (x_min, y_min, x_max, y_max) """ # convert center to corners format boxes = center_to_corners_format(boxes) # convert to absolute coordinates height, width = image_size boxes = boxes * torch.tensor([[width, height, width, height]]) return boxes def augment_and_transform_batch( examples: Mapping[str, Any], transform: A.Compose, image_processor: AutoImageProcessor, return_pixel_mask: bool = False, ) -> BatchFeature: """Apply augmentations and format annotations in COCO format for object detection task""" images = [] annotations = [] for image_id, image, objects in zip(examples["image_id"], examples["image"], examples["objects"]): image = np.array(image.convert("RGB")) # apply augmentations output = transform(image=image, bboxes=objects["bbox"], category=objects["category"]) images.append(output["image"]) # format annotations in COCO format formatted_annotations = format_image_annotations_as_coco( image_id, output["category"], objects["area"], output["bboxes"] ) annotations.append(formatted_annotations) # Apply the image processor transformations: resizing, rescaling, normalization result = image_processor(images=images, annotations=annotations, return_tensors="pt") if not return_pixel_mask: result.pop("pixel_mask", None) return result def collate_fn(batch: List[BatchFeature]) -> Mapping[str, Union[torch.Tensor, List[Any]]]: data = {} data["pixel_values"] = torch.stack([x["pixel_values"] for x in batch]) data["labels"] = [x["labels"] for x in batch] if "pixel_mask" in batch[0]: data["pixel_mask"] = torch.stack([x["pixel_mask"] for x in batch]) return data @torch.no_grad() def compute_metrics( evaluation_results: EvalPrediction, image_processor: AutoImageProcessor, threshold: float = 0.0, id2label: Optional[Mapping[int, str]] = None, ) -> Mapping[str, float]: """ Compute mean average mAP, mAR and their variants for the object detection task. Args: evaluation_results (EvalPrediction): Predictions and targets from evaluation. threshold (float, optional): Threshold to filter predicted boxes by confidence. Defaults to 0.0. id2label (Optional[dict], optional): Mapping from class id to class name. Defaults to None. Returns: Mapping[str, float]: Metrics in a form of dictionary {<metric_name>: <metric_value>} """ predictions, targets = evaluation_results.predictions, evaluation_results.label_ids # For metric computation we need to provide: # - targets in a form of list of dictionaries with keys "boxes", "labels" # - predictions in a form of list of dictionaries with keys "boxes", "scores", "labels" image_sizes = [] post_processed_targets = [] post_processed_predictions = [] # Collect targets in the required format for metric computation for batch in targets: # collect image sizes, we will need them for predictions post processing batch_image_sizes = torch.tensor([x["orig_size"] for x in batch]) image_sizes.append(batch_image_sizes) # collect targets in the required format for metric computation # boxes were converted to YOLO format needed for model training # here we will convert them to Pascal VOC format (x_min, y_min, x_max, y_max) for image_target in batch: boxes = torch.tensor(image_target["boxes"]) boxes = convert_bbox_yolo_to_pascal(boxes, image_target["orig_size"]) labels = torch.tensor(image_target["class_labels"]) post_processed_targets.append({"boxes": boxes, "labels": labels}) # Collect predictions in the required format for metric computation, # model produce boxes in YOLO format, then image_processor convert them to Pascal VOC format for batch, target_sizes in zip(predictions, image_sizes): batch_logits, batch_boxes = batch[1], batch[2] output = ModelOutput(logits=torch.tensor(batch_logits), pred_boxes=torch.tensor(batch_boxes)) post_processed_output = image_processor.post_process_object_detection( output, threshold=threshold, target_sizes=target_sizes ) post_processed_predictions.extend(post_processed_output) # Compute metrics metric = MeanAveragePrecision(box_format="xyxy", class_metrics=True) metric.update(post_processed_predictions, post_processed_targets) metrics = metric.compute() # Replace list of per class metrics with separate metric for each class classes = metrics.pop("classes") map_per_class = metrics.pop("map_per_class") mar_100_per_class = metrics.pop("mar_100_per_class") for class_id, class_map, class_mar in zip(classes, map_per_class, mar_100_per_class): class_name = id2label[class_id.item()] if id2label is not None else class_id.item() metrics[f"map_{class_name}"] = class_map metrics[f"mar_100_{class_name}"] = class_mar metrics = {k: round(v.item(), 4) for k, v in metrics.items()} return metrics @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default="cppe-5", metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." }, ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_val_split: Optional[float] = field( default=0.15, metadata={"help": "Percent to split off of train for validation."} ) image_square_size: Optional[int] = field( default=600, metadata={"help": "Image longest size will be resized to this value, then image will be padded to square."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) use_fast: Optional[bool] = field( default=True, metadata={"help": "Use a fast torchvision-base image processor if it is supported for a given model."}, ) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default="facebook/detr-resnet-50", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) ignore_mismatched_sizes: bool = field( default=False, metadata={ "help": "Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels)." }, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_object_detection", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir: checkpoint = get_last_checkpoint(training_args.output_dir) if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # ------------------------------------------------------------------------------------------------ # Load dataset, prepare splits # ------------------------------------------------------------------------------------------------ dataset = load_dataset( data_args.dataset_name, cache_dir=model_args.cache_dir, trust_remote_code=model_args.trust_remote_code ) # If we don't have a validation split, split off a percentage of train as validation data_args.train_val_split = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = dataset["train"].train_test_split(data_args.train_val_split, seed=training_args.seed) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Get dataset categories and prepare mappings for label_name <-> label_id categories = dataset["train"].features["objects"].feature["category"].names id2label = dict(enumerate(categories)) label2id = {v: k for k, v in id2label.items()} # ------------------------------------------------------------------------------------------------ # Load pretrained config, model and image processor # ------------------------------------------------------------------------------------------------ common_pretrained_args = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "token": model_args.token, "trust_remote_code": model_args.trust_remote_code, } config = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path, label2id=label2id, id2label=id2label, **common_pretrained_args, ) model = AutoModelForObjectDetection.from_pretrained( model_args.model_name_or_path, config=config, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, **common_pretrained_args, ) image_processor = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path, do_resize=True, size={"max_height": data_args.image_square_size, "max_width": data_args.image_square_size}, do_pad=True, pad_size={"height": data_args.image_square_size, "width": data_args.image_square_size}, use_fast=data_args.use_fast, **common_pretrained_args, ) # ------------------------------------------------------------------------------------------------ # Define image augmentations and dataset transforms # ------------------------------------------------------------------------------------------------ max_size = data_args.image_square_size train_augment_and_transform = A.Compose( [ A.Compose( [ A.SmallestMaxSize(max_size=max_size, p=1.0), A.RandomSizedBBoxSafeCrop(height=max_size, width=max_size, p=1.0), ], p=0.2, ), A.OneOf( [ A.Blur(blur_limit=7, p=0.5), A.MotionBlur(blur_limit=7, p=0.5), A.Defocus(radius=(1, 5), alias_blur=(0.1, 0.25), p=0.1), ], p=0.1, ), A.Perspective(p=0.1), A.HorizontalFlip(p=0.5), A.RandomBrightnessContrast(p=0.5), A.HueSaturationValue(p=0.1), ], bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True, min_area=25), ) validation_transform = A.Compose( [A.NoOp()], bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True), ) # Make transform functions for batch and apply for dataset splits train_transform_batch = partial( augment_and_transform_batch, transform=train_augment_and_transform, image_processor=image_processor ) validation_transform_batch = partial( augment_and_transform_batch, transform=validation_transform, image_processor=image_processor ) dataset["train"] = dataset["train"].with_transform(train_transform_batch) dataset["validation"] = dataset["validation"].with_transform(validation_transform_batch) dataset["test"] = dataset["test"].with_transform(validation_transform_batch) # ------------------------------------------------------------------------------------------------ # Model training and evaluation with Trainer API # ------------------------------------------------------------------------------------------------ eval_compute_metrics_fn = partial( compute_metrics, image_processor=image_processor, id2label=id2label, threshold=0.0 ) trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, processing_class=image_processor, data_collator=collate_fn, compute_metrics=eval_compute_metrics_fn, ) # Training if training_args.do_train: train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Final evaluation if training_args.do_eval: metrics = trainer.evaluate(eval_dataset=dataset["test"], metric_key_prefix="test") trainer.log_metrics("test", metrics) trainer.save_metrics("test", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "dataset": data_args.dataset_name, "tags": ["object-detection", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) if __name__ == "__main__": main()
transformers/examples/pytorch/object-detection/run_object_detection.py/0
{ "file_path": "transformers/examples/pytorch/object-detection/run_object_detection.py", "repo_id": "transformers", "token_count": 8023 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Finetuning any 🤗 Transformers model supported by AutoModelForSemanticSegmentation for semantic segmentation.""" import argparse import json import math import os import warnings from functools import partial from pathlib import Path import albumentations as A import datasets import evaluate import numpy as np import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from albumentations.pytorch import ToTensorV2 from datasets import load_dataset from huggingface_hub import HfApi, hf_hub_download from torch.utils.data import DataLoader from tqdm.auto import tqdm import transformers from transformers import ( AutoConfig, AutoImageProcessor, AutoModelForSemanticSegmentation, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") logger = get_logger(__name__) require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") def reduce_labels_transform(labels: np.ndarray, **kwargs) -> np.ndarray: """Set `0` label as with value 255 and then reduce all other labels by 1. Example: Initial class labels: 0 - background; 1 - road; 2 - car; Transformed class labels: 255 - background; 0 - road; 1 - car; **kwargs are required to use this function with albumentations. """ labels[labels == 0] = 255 labels = labels - 1 labels[labels == 254] = 255 return labels def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a image semantic segmentation task") parser.add_argument( "--model_name_or_path", type=str, help="Path to a pretrained model or model identifier from huggingface.co/models.", default="nvidia/mit-b0", ) parser.add_argument( "--dataset_name", type=str, help="Name of the dataset on the hub.", default="segments/sidewalk-semantic", ) parser.add_argument( "--do_reduce_labels", action="store_true", help="Whether or not to reduce all labels by 1 and replace background by 255.", ) parser.add_argument( "--reduce_labels", action="store_true", help="Whether or not to reduce all labels by 1 and replace background by 255.", ) parser.add_argument( "--train_val_split", type=float, default=0.15, help="Fraction of the dataset to be used for validation.", ) parser.add_argument( "--cache_dir", type=str, help="Path to a folder in which the model and dataset will be cached.", ) parser.add_argument( "--use_auth_token", action="store_true", help="Whether to use an authentication token to access the model repository.", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="Beta1 for AdamW optimizer", ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="Beta2 for AdamW optimizer", ) parser.add_argument( "--adam_epsilon", type=float, default=1e-8, help="Epsilon for AdamW optimizer", ) parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="polynomial", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--trust_remote_code", action="store_true", help=( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ), ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", required=False, action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. ' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.push_to_hub or args.with_tracking: if args.output_dir is None: raise ValueError( "Need an `output_dir` to create a repo when `--push_to_hub` or `with_tracking` is specified." ) # Deprecation if args.reduce_labels: args.do_reduce_labels = args.reduce_labels warnings.warn( "The `reduce_labels` argument is deprecated and will be removed in v4.45. Please use `do_reduce_labels` instead.", FutureWarning, ) if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_semantic_segmentation_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs["log_with"] = args.report_to accelerator_log_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. # We set device_specific to True as we want different data augmentation per device. if args.seed is not None: set_seed(args.seed, device_specific=True) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir, trust_remote_code=args.trust_remote_code) # Rename column names to standardized names (only "image" and "label" need to be present) if "pixel_values" in dataset["train"].column_names: dataset = dataset.rename_columns({"pixel_values": "image"}) if "annotation" in dataset["train"].column_names: dataset = dataset.rename_columns({"annotation": "label"}) # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset["train"].train_test_split(args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if args.dataset_name == "scene_parse_150": repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" else: repo_id = args.dataset_name filename = "id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} # Load pretrained model and image processor config = AutoConfig.from_pretrained( args.model_name_or_path, id2label=id2label, label2id=label2id, trust_remote_code=args.trust_remote_code ) image_processor = AutoImageProcessor.from_pretrained( args.model_name_or_path, trust_remote_code=args.trust_remote_code ) model = AutoModelForSemanticSegmentation.from_pretrained( args.model_name_or_path, config=config, trust_remote_code=args.trust_remote_code, do_reduce_labels=args.do_reduce_labels, ) # Define transforms to be applied to each image and target. if "shortest_edge" in image_processor.size: # We instead set the target size as (shortest_edge, shortest_edge) to here to ensure all images are batchable. height, width = image_processor.size["shortest_edge"], image_processor.size["shortest_edge"] else: height, width = image_processor.size["height"], image_processor.size["width"] train_transforms = A.Compose( [ A.Lambda(name="reduce_labels", mask=reduce_labels_transform if args.do_reduce_labels else None, p=1.0), # pad image with 255, because it is ignored by loss A.PadIfNeeded(min_height=height, min_width=width, border_mode=0, value=255, p=1.0), A.RandomCrop(height=height, width=width, p=1.0), A.HorizontalFlip(p=0.5), A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std, max_pixel_value=255.0, p=1.0), ToTensorV2(), ] ) val_transforms = A.Compose( [ A.Lambda(name="reduce_labels", mask=reduce_labels_transform if args.do_reduce_labels else None, p=1.0), A.Resize(height=height, width=width, p=1.0), A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std, max_pixel_value=255.0, p=1.0), ToTensorV2(), ] ) def preprocess_batch(example_batch, transforms: A.Compose): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): transformed = transforms(image=np.array(image.convert("RGB")), mask=np.array(target)) pixel_values.append(transformed["image"]) labels.append(transformed["mask"]) encoding = {} encoding["pixel_values"] = torch.stack(pixel_values).to(torch.float) encoding["labels"] = torch.stack(labels).to(torch.long) return encoding # Preprocess function for dataset should have only one input argument, # so we use partial to pass transforms preprocess_train_batch_fn = partial(preprocess_batch, transforms=train_transforms) preprocess_val_batch_fn = partial(preprocess_batch, transforms=val_transforms) with accelerator.main_process_first(): train_dataset = dataset["train"].with_transform(preprocess_train_batch_fn) eval_dataset = dataset["validation"].with_transform(preprocess_val_batch_fn) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer optimizer = torch.optim.AdamW( list(model.parameters()), lr=args.learning_rate, betas=[args.adam_beta1, args.adam_beta2], eps=args.adam_epsilon, ) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps if overrode_max_train_steps else args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Instantiate metric metric = evaluate.load("mean_iou", cache_dir=args.cache_dir) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("semantic_segmentation_no_trainer", experiment_config) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(checkpoint_path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if completed_steps >= args.max_train_steps: break logger.info("***** Running evaluation *****") model.eval() for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)): with torch.no_grad(): outputs = model(**batch) upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False ) predictions = upsampled_logits.argmax(dim=1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metrics = metric.compute( num_labels=len(id2label), ignore_index=255, reduce_labels=False, # we've already reduced the labels before ) logger.info(f"epoch {epoch}: {eval_metrics}") if args.with_tracking: accelerator.log( { "mean_iou": eval_metrics["mean_iou"], "mean_accuracy": eval_metrics["mean_accuracy"], "overall_accuracy": eval_metrics["overall_accuracy"], "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.with_tracking: accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) all_results = { f"eval_{k}": v.tolist() if isinstance(v, np.ndarray) else v for k, v in eval_metrics.items() } with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump(all_results, f, indent=2) if __name__ == "__main__": main()
transformers/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py/0
{ "file_path": "transformers/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py", "repo_id": "transformers", "token_count": 11268 }
# Text Summarization with Pretrained Encoders This folder contains part of the code necessary to reproduce the results on abstractive summarization from the article [Text Summarization with Pretrained Encoders](https://arxiv.org/pdf/1908.08345.pdf) by [Yang Liu](https://nlp-yang.github.io/) and [Mirella Lapata](https://homepages.inf.ed.ac.uk/mlap/). It can also be used to summarize any document. The original code can be found on the Yang Liu's [github repository](https://github.com/nlpyang/PreSumm). The model is loaded with the pre-trained weights for the abstractive summarization model trained on the CNN/Daily Mail dataset with an extractive and then abstractive tasks. ## Setup ```bash git clone https://github.com/huggingface/transformers && cd transformers pip install . pip install nltk py-rouge cd examples/seq2seq/bertabs ``` ## Reproduce the authors' ROUGE score To be able to reproduce the authors' results on the CNN/Daily Mail dataset you first need to download both CNN and Daily Mail datasets [from Kyunghyun Cho's website](https://cs.nyu.edu/~kcho/DMQA/) (the links next to "Stories") in the same folder. Then uncompress the archives by running: ```bash tar -xvf cnn_stories.tgz && tar -xvf dailymail_stories.tgz ``` And move all the stories to the same folder. We will refer as `$DATA_PATH` the path to where you uncompressed both archive. Then run the following in the same folder as `run_summarization.py`: ```bash python run_summarization.py \ --documents_dir $DATA_PATH \ --summaries_output_dir $SUMMARIES_PATH \ # optional --no_cuda false \ --batch_size 4 \ --min_length 50 \ --max_length 200 \ --beam_size 5 \ --alpha 0.95 \ --block_trigram true \ --compute_rouge true ``` The scripts executes on GPU if one is available and if `no_cuda` is not set to `true`. Inference on multiple GPUs is not supported yet. The ROUGE scores will be displayed in the console at the end of evaluation and written in a `rouge_scores.txt` file. The script takes 30 hours to compute with a single Tesla V100 GPU and a batch size of 10 (300,000 texts to summarize). ## Summarize any text Put the documents that you would like to summarize in a folder (the path to which is referred to as `$DATA_PATH` below) and run the following in the same folder as `run_summarization.py`: ```bash python run_summarization.py \ --documents_dir $DATA_PATH \ --summaries_output_dir $SUMMARIES_PATH \ # optional --no_cuda false \ --batch_size 4 \ --min_length 50 \ --max_length 200 \ --beam_size 5 \ --alpha 0.95 \ --block_trigram true \ ``` You may want to play around with `min_length`, `max_length` and `alpha` to suit your use case. If you want to compute ROUGE on another dataset you will need to tweak the stories/summaries import in `utils_summarization.py` and tell it where to fetch the reference summaries.
transformers/examples/research_projects/bertabs/README.md/0
{ "file_path": "transformers/examples/research_projects/bertabs/README.md", "repo_id": "transformers", "token_count": 909 }
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training DistilBERT. Specific to BERT -> DistilBERT. """ import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": parser = argparse.ArgumentParser( description=( "Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="bert", choices=["bert"]) parser.add_argument("--model_name", default="bert-base-uncased", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") args = parser.parse_args() if args.model_type == "bert": model = BertForMaskedLM.from_pretrained(args.model_name) prefix = "bert" else: raise ValueError('args.model_type should be "bert".') state_dict = model.state_dict() compressed_sd = {} for w in ["word_embeddings", "position_embeddings"]: compressed_sd[f"distilbert.embeddings.{w}.weight"] = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: compressed_sd[f"distilbert.embeddings.LayerNorm.{w}"] = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] std_idx = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.q_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.k_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.v_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.out_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.sa_layer_norm.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.ffn.lin1.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.ffn.lin2.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.output_layer_norm.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 compressed_sd["vocab_projector.weight"] = state_dict["cls.predictions.decoder.weight"] compressed_sd["vocab_projector.bias"] = state_dict["cls.predictions.bias"] if args.vocab_transform: for w in ["weight", "bias"]: compressed_sd[f"vocab_transform.{w}"] = state_dict[f"cls.predictions.transform.dense.{w}"] compressed_sd[f"vocab_layer_norm.{w}"] = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
transformers/examples/research_projects/distillation/scripts/extract_distilbert.py/0
{ "file_path": "transformers/examples/research_projects/distillation/scripts/extract_distilbert.py", "repo_id": "transformers", "token_count": 1865 }
# Information Gain Filtration(IGF) Authors @Tuko @mraunak This folder contains the code how to implement IGF for finetuning on GPT-2. ## What is IGF? Here we present a general fine-tuning method that we call information gain filtration for improving the overall training efficiency and final performance of language model fine-tuning(see paper below). The method is an alternative fine-tuning method that trains a secondary model (e.g., a simple convolutional network) to predict the amount of information gained over a given pre-trained model. The secondary model is lightweight and trained to predict the Information Gain measure. Information Gain is defined as the change in a loss function for a model before and after an SGD update with a sample (Equation X in the paper). A small subset of the training set named the “objective” set, is used to measure information gain on the pre-trained model, and consequently to train the secondary model. After training, the model is used for filtering samples for the fine-tuning process. Therefore, a high information gain value would suggest a sample is informative, whereas a low value would suggest a non-informative sample that should be filtered out. Thus, a thresholding strategy is defined to select informative samples. With such a strategy, samples are filtered and once enough samples are selected to form a mini-batch and a usual fine-tuning/optimization step is applied. The filtration process is repeated until the fine-tuning process is over. Paper [Selecting Informative Contexts Improves Language Model Finetuning](https://arxiv.org/abs/2005.00175) # Results Several experiments were conducted to show the robustness of the IGF method versus the standard fine-tuning process. For example, we achieve a median perplexity of 54.0 on the Books dataset compared to 57.3 for standard fine-tuning on GPT-2 Small. The code was implemented using the Transformers library and Pytorch. While the method may seem more expensive, we saw enough evidence that it may lead to a performance benefit in the final models. ![IGF performance](result_igf.png) Figure 1: Comparing IGF to Standard Fine-tuning: IGF with constant (p < 10−3 , t-test) and shifting(p < 10−6 , t-test) thresholding significantly outperform standard fine-tuning. The left-hand figure shows test-set perplexity after each fine-tuning batch, averaged over 50 runs (error bars denote ± one standard error). The right-hand figure shows the perplexity of each method after 60 batches. IGF with shifting thresholding (red) clearly improves over standard batched fine-tuning with Adam ## How to use this project? To fine-tune a transformer model with IGF on a language modeling task, use the following script: - `model_name_or_path`: Path to pretrained model or model identifier from huggingface.co/models - `data_file`: A jbl file containing tokenized data which can be split as objective dataset, train_dataset and test_dataset - `igf_data_file`: A jbl file containing the context and information gain pairs to train secondary learner. - `context_len`: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. - `size_objective_set`: Number of articles that are long enough to be used as our objective set" - `min_len`: The minimum length of the article to be used as objective set - `trim`: Truncate the example if it exceeds context length - `eval_freq`: Secondary model evaluation can be triggered at eval_freq - `max_steps`: To calculate training epochs - `number`: The number of examples split to be used as objective_set/test_data - `secondary_learner_batch_size`: The batch size of training data for secondary learner - `secondary_learner_max_epochs`: The number of epochs to train secondary learner - `recopy_model`: Reset the model to the original pretrained GPT-2 weights after each iteration - `eval_interval`: Decay the selectivity of our secondary learner filter from" 1 standard deviation above average to 1 below average after eval_interval(10) batches" ```python python run_clm_igf.py\ --model_name_or_path "openai-community/gpt2" \ --data_file="data/tokenized_stories_train_wikitext103" \ --igf_data_file="data/IGF_values" \ --context_len 32 \ --size_objective_set 100 \ --min_len 1026 \ --trim True \ --eval_freq 100 \ --max_steps 1000 \ --secondary_learner_batch_size 128 \ --secondary_learner_max_epochs 15 \ --number 100 \ --recopy_model \ --eval_interval 10 \ ``` ## Citation If you find the resource useful, please cite the following paper ```bibtex @inproceedings{antonello-etal-2021-selecting, title = "Selecting Informative Contexts Improves Language Model Fine-tuning", author = "Antonello, Richard and Beckage, Nicole and Turek, Javier and Huth, Alexander", booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.acl-long.87", doi = "10.18653/v1/2021.acl-long.87", pages = "1072--1085", } ```
transformers/examples/research_projects/information-gain-filtration/README.md/0
{ "file_path": "transformers/examples/research_projects/information-gain-filtration/README.md", "repo_id": "transformers", "token_count": 1461 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ import logging import os import sys import time from collections import defaultdict from dataclasses import dataclass, field # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. from pathlib import Path from typing import Dict, List, Optional, Tuple import datasets import flax import jax import jax.numpy as jnp import numpy as np import optax from datasets import load_dataset from flax import jax_utils, traverse_util from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from tqdm import tqdm from transformers import ( CONFIG_MAPPING, FLAX_MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoTokenizer, FlaxAutoModelForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, TrainingArguments, is_tensorboard_available, set_seed, ) if datasets.__version__ <= "1.8.0": raise ValueError("Make sure to upgrade `datasets` to a version >= 1.9.0 to use dataset streaming") MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) line_by_line: bool = field( default=False, metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, ) text_column_name: str = field( default="text", metadata={"help": "The name of the column to retrieve the training text."} ) shuffle_buffer_size: int = field( default=10000, metadata={"help": "The number of examples to pre-load for shuffling."} ) num_train_steps: int = field(default=50000, metadata={"help": "The number of training steps."}) num_eval_samples: int = field(default=50000, metadata={"help": "The number of samples to be used for evaluation"}) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." @flax.struct.dataclass class FlaxDataCollatorForLanguageModeling: """ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. mlm_probability (:obj:`float`, `optional`, defaults to 0.15): The probability with which to (randomly) mask tokens in the input. .. note:: For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the argument :obj:`return_special_tokens_mask=True`. """ tokenizer: PreTrainedTokenizerBase mlm_probability: float = 0.15 def __post_init__(self): if self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. " "You should pass `mlm=False` to train on causal language modeling instead." ) def __call__(self, examples: List[Dict[str, np.ndarray]]) -> Dict[str, np.ndarray]: # Handle dict or lists with proper padding and conversion to tensor. batch = self.tokenizer.pad(examples, return_tensors=TensorType.NUMPY) # If special token mask has been preprocessed, pop it from the dict. special_tokens_mask = batch.pop("special_tokens_mask", None) batch["input_ids"], batch["labels"] = self.mask_tokens( batch["input_ids"], special_tokens_mask=special_tokens_mask ) return batch def mask_tokens( self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray] ) -> Tuple[jnp.ndarray, jnp.ndarray]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.copy() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = np.full(labels.shape, self.mlm_probability) special_tokens_mask = special_tokens_mask.astype("bool") probability_matrix[special_tokens_mask] = 0.0 masked_indices = np.random.binomial(1, probability_matrix).astype("bool") labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool") indices_random &= masked_indices & ~indices_replaced random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4") inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray: num_samples = len(samples_idx) samples_to_remove = num_samples % batch_size if samples_to_remove != 0: samples_idx = samples_idx[:-samples_to_remove] sections_split = num_samples // batch_size batch_idx = np.split(samples_idx, sections_split) return batch_idx def advance_iter_and_group_samples(train_iterator, num_samples, max_seq_length): """ The training iterator is advanced so that after groupifying the samples, `num_samples` of length `max_seq_length` are returned. """ num_total_tokens = max_seq_length * num_samples samples = defaultdict(list) i = 0 while i < num_total_tokens: tokenized_samples = next(train_iterator) i += len(tokenized_samples["input_ids"]) # concatenate tokenized samples to list (excluding "id" and "text") samples = { k: samples[k] + tokenized_samples[k] for k in ["input_ids", "attention_mask", "special_tokens_mask"] } # Concatenated tokens are split to lists of length `max_seq_length`. # Note that remainedr of % max_seq_length are thrown away. def group_texts(examples): result = { k: [t[i : i + max_seq_length] for i in range(0, num_total_tokens, max_seq_length)] for k, t in examples.items() } return result grouped_samples = group_texts(samples) return grouped_samples def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) if __name__ == "__main__": # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level="INFO", datefmt="[%X]", ) # Log on each process the small summary: logger = logging.getLogger(__name__) logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, streaming=True, split="train", ) if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts. # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more # efficient when it receives the `special_tokens_mask`. def tokenize_function(examples): return tokenizer(examples[data_args.text_column_name], return_special_tokens_mask=True) tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=list(dataset.features.keys())) shuffle_seed = training_args.seed tokenized_datasets = tokenized_datasets.shuffle(buffer_size=data_args.shuffle_buffer_size, seed=shuffle_seed) has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) # Data collator # This one will take care of randomly masking the tokens. data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) if model_args.model_name_or_path: model = FlaxAutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) ) else: model = FlaxAutoModelForMaskedLM.from_config( config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) ) # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() # define number steps per stream epoch num_train_steps = data_args.num_train_steps # Create learning rate schedule warmup_fn = optax.linear_schedule( init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps ) decay_fn = optax.linear_schedule( init_value=training_args.learning_rate, end_value=0, transition_steps=num_train_steps - training_args.warmup_steps, ) linear_decay_lr_schedule_fn = optax.join_schedules( schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps] ) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. # Note that this mask is specifically adapted for FlaxBERT-like models. # For other models, one should correct the layer norm parameter naming # accordingly. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) # create adam optimizer adamw = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) # Setup train state state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw) # Define gradient update step fn def train_step(state, batch, dropout_rng): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def loss_fn(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # take average loss = loss.sum() / label_mask.sum() return loss grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = jax.lax.pmean( {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" ) return new_state, metrics, new_dropout_rng # Create parallel version of the train step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Define eval fn def eval_step(params, batch): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # compute accuracy accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask # summarize metrics metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()} metrics = jax.lax.psum(metrics, axis_name="batch") return metrics p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) train_time = 0 train_start = time.time() train_metrics = [] eval_metrics = [] training_iter = iter(tokenized_datasets) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) eval_samples = advance_iter_and_group_samples(training_iter, data_args.num_eval_samples, max_seq_length) steps = tqdm(range(num_train_steps), desc="Training...", position=0) for step in range(num_train_steps): # ======================== Training ================================ try: samples = advance_iter_and_group_samples(training_iter, train_batch_size, max_seq_length) except StopIteration: # Once the end of the dataset stream is reached, the training iterator # is reinitialized and reshuffled and a new eval dataset is randomly chosen. shuffle_seed += 1 tokenized_datasets.set_epoch(shuffle_seed) training_iter = iter(tokenized_datasets) eval_dataset = advance_iter_and_group_samples(training_iter, data_args.num_eval_samples, max_seq_length) samples = advance_iter_and_group_samples(training_iter, train_batch_size, max_seq_length) # process input samples model_inputs = data_collator(samples) # Model forward model_inputs = shard(model_inputs.data) state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs) train_metrics.append(train_metric) if step % training_args.logging_steps == 0 and step > 0: steps.write( f"Step... ({step} | Loss: {train_metric['loss'].mean()}, Learning Rate:" f" {train_metric['learning_rate'].mean()})" ) train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, step) train_metrics = [] # ======================== Evaluating ============================== if step % training_args.eval_steps == 0 and step > 0: # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(data_args.num_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=1)): # process input samples batch_eval_samples = {k: [v[idx] for idx in batch_idx] for k, v in eval_samples.items()} model_inputs = data_collator(batch_eval_samples) # Model forward model_inputs = shard(model_inputs.data) metrics = p_eval_step(state.params, model_inputs) eval_metrics.append(metrics) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) # Update progress bar steps.desc = ( f"Step... ({step + 1}/{num_train_steps} | Loss: {eval_metrics['loss']}, Acc:" f" {eval_metrics['accuracy']})" ) if has_tensorboard and jax.process_index() == 0: write_eval_metric(summary_writer, eval_metrics, step) eval_metrics = [] # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained( training_args.output_dir, params=params, push_to_hub=training_args.push_to_hub, commit_message=f"Saving weights and logs of step {step+1}", ) # update tqdm bar steps.update(1)
transformers/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py/0
{ "file_path": "transformers/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py", "repo_id": "transformers", "token_count": 10618 }
import functools import math import os # noqa: F401 from random import choice, randint from time import time import datasets # noqa: F401 import faiss # noqa: F401 import numpy as np import pandas as pd import torch import torch.utils.checkpoint as checkpoint from elasticsearch import Elasticsearch # noqa: F401 from elasticsearch.helpers import bulk, streaming_bulk # noqa: F401 from torch import nn from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler from tqdm import tqdm from transformers import AdamW, AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup pd.set_option("display.max_colwidth", None) ############### # Sparse index ############### def make_es_index_snippets(es_client, passages_dset, index_name="english_wiki_kilt_snippets_100w"): index_config = { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "article_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, "section_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, "passage_text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, } }, } es_client.indices.create(index=index_name, body=index_config) number_of_docs = passages_dset.num_rows progress = tqdm(unit="docs", total=number_of_docs) successes = 0 def passage_generator(): for passage in passages_dset: yield passage # create the ES index for ok, action in streaming_bulk( client=es_client, index=index_name, actions=passage_generator(), ): progress.update(1) successes += ok print("Indexed %d documents" % (successes,)) def query_es_index(question, es_client, index_name="english_wiki_kilt_snippets_100w", n_results=10, min_length=20): q = question.lower() banned = ["how", "why", "what", "where", "which", "do", "does", "is", "?", "eli5", "eli5:"] q = " ".join([w for w in q.split() if w not in banned]) response = es_client.search( index=index_name, body={ "query": { "multi_match": { "query": q, "fields": ["article_title", "section_title", "passage_text^2"], "type": "cross_fields", } }, "size": 2 * n_results, }, ) hits = response["hits"]["hits"] support_doc = "<P> " + " <P> ".join([hit["_source"]["passage_text"] for hit in hits]) res_list = [{k: hit["_source"][k] for k in hit["_source"] if k != "passage_text"} for hit in hits] for r, hit in zip(res_list, hits): r["passage_id"] = hit["_id"] r["score"] = hit["_score"] r["passage_text"] = hit["_source"]["passage_text"] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] return support_doc, res_list ############### # ELI5 retriever training ############### class ELI5DatasetQARetriver(Dataset): def __init__(self, examples_array, extra_answer_threshold=3, min_answer_length=64, training=True, n_samples=None): self.data = examples_array self.answer_thres = extra_answer_threshold self.min_length = min_answer_length self.training = training self.n_samples = self.data.num_rows if n_samples is None else n_samples def __len__(self): return self.n_samples def make_example(self, idx): example = self.data[idx] question = example["title"] if self.training: answers = [a for i, (a, sc) in enumerate(zip(example["answers"]["text"], example["answers"]["score"]))] answer_tab = choice(answers).split(" ") start_idx = randint(0, max(0, len(answer_tab) - self.min_length)) answer_span = " ".join(answer_tab[start_idx:]) else: answer_span = example["answers"]["text"][0] return (question, answer_span) def __getitem__(self, idx): return self.make_example(idx % self.data.num_rows) class RetrievalQAEmbedder(nn.Module): def __init__(self, sent_encoder, dim): super(RetrievalQAEmbedder, self).__init__() self.sent_encoder = sent_encoder self.output_dim = 128 self.project_q = nn.Linear(dim, self.output_dim, bias=False) self.project_a = nn.Linear(dim, self.output_dim, bias=False) self.ce_loss = nn.CrossEntropyLoss(reduction="mean") def embed_sentences_checkpointed(self, input_ids, attention_mask, checkpoint_batch_size=-1): # reproduces BERT forward pass with checkpointing if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size: return self.sent_encoder(input_ids, attention_mask=attention_mask)[1] else: # prepare implicit variables device = input_ids.device input_shape = input_ids.size() token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) head_mask = [None] * self.sent_encoder.config.num_hidden_layers extended_attention_mask: torch.Tensor = self.sent_encoder.get_extended_attention_mask( attention_mask, input_shape ) # define function for checkpointing def partial_encode(*inputs): encoder_outputs = self.sent_encoder.encoder( inputs[0], attention_mask=inputs[1], head_mask=head_mask, ) sequence_output = encoder_outputs[0] pooled_output = self.sent_encoder.pooler(sequence_output) return pooled_output # run embedding layer on everything at once embedding_output = self.sent_encoder.embeddings( input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None ) # run encoding and pooling on one mini-batch at a time pooled_output_list = [] for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)): b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) pooled_output_list.append(pooled_output) return torch.cat(pooled_output_list, dim=0) def embed_questions(self, q_ids, q_mask, checkpoint_batch_size=-1): q_reps = self.embed_sentences_checkpointed(q_ids, q_mask, checkpoint_batch_size) return self.project_q(q_reps) def embed_answers(self, a_ids, a_mask, checkpoint_batch_size=-1): a_reps = self.embed_sentences_checkpointed(a_ids, a_mask, checkpoint_batch_size) return self.project_a(a_reps) def forward(self, q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=-1): device = q_ids.device q_reps = self.embed_questions(q_ids, q_mask, checkpoint_batch_size) a_reps = self.embed_answers(a_ids, a_mask, checkpoint_batch_size) compare_scores = torch.mm(q_reps, a_reps.t()) loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) loss = (loss_qa + loss_aq) / 2 return loss def make_qa_retriever_model(model_name="google/bert_uncased_L-8_H-512_A-8", from_file=None, device="cuda:0"): tokenizer = AutoTokenizer.from_pretrained(model_name) bert_model = AutoModel.from_pretrained(model_name).to(device) # run bert_model on a dummy batch to get output dimension d_ids = torch.LongTensor( [[bert_model.config.bos_token_id if bert_model.config.bos_token_id is not None else 1]] ).to(device) d_mask = torch.LongTensor([[1]]).to(device) sent_dim = bert_model(d_ids, attention_mask=d_mask)[1].shape[-1] qa_embedder = RetrievalQAEmbedder(bert_model, sent_dim).to(device) if from_file is not None: param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states qa_embedder.load_state_dict(param_dict["model"]) return tokenizer, qa_embedder def make_qa_retriever_batch(qa_list, tokenizer, max_len=64, device="cuda:0"): q_ls = [q for q, a in qa_list] a_ls = [a for q, a in qa_list] q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) a_toks = tokenizer(a_ls, max_length=max_len, padding="max_length", truncation=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) return (q_ids, q_mask, a_ids, a_mask) def train_qa_retriever_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0): model.train() # make iterator train_sampler = RandomSampler(dataset) model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, batch in enumerate(epoch_iterator): q_ids, q_mask, a_ids, a_mask = batch pre_loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size) loss = pre_loss.sum() # optimizer loss.backward() optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0 or step == 1: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def train_qa_retriever_joint_epoch(model, dataset_list, tokenizer, optimizer, scheduler, args, e=0): model.train() model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) # make iterator train_samplers = [RandomSampler(dataset) for dataset in dataset_list] data_loaders = [ DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) for dataset, train_sampler in zip(dataset_list, train_samplers) ] iterators = [iter(dloader) for dloader in data_loaders] joint_iter = zip(*iterators) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, (batches,) in enumerate(zip(joint_iter)): for batch in batches: q_ids, q_mask, a_ids, a_mask = batch loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size) # optimizer loss.backward() optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset_list[0]) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def evaluate_qa_retriever(model, dataset, tokenizer, args): model.eval() # make iterator eval_sampler = SequentialSampler(dataset) model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=eval_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) tot_loss = 0.0 with torch.no_grad(): for step, batch in enumerate(epoch_iterator): q_ids, q_mask, a_ids, a_mask = batch loss = model(q_ids, q_mask, a_ids, a_mask) tot_loss += loss.item() return tot_loss / (step + 1) def train_qa_retriever(qar_model, qar_tokenizer, qar_train_dset, qar_valid_dset, qar_args): qar_optimizer = AdamW(qar_model.parameters(), lr=qar_args.learning_rate, eps=1e-8) qar_scheduler = get_linear_schedule_with_warmup( qar_optimizer, num_warmup_steps=100, num_training_steps=(qar_args.num_epochs + 1) * math.ceil(len(qar_train_dset) / qar_args.batch_size), ) for e in range(qar_args.num_epochs): train_qa_retriever_epoch(qar_model, qar_train_dset, qar_tokenizer, qar_optimizer, qar_scheduler, qar_args, e) m_save_dict = { "model": qar_model.state_dict(), "optimizer": qar_optimizer.state_dict(), "scheduler": qar_scheduler.state_dict(), } print("Saving model {}".format(qar_args.model_save_name)) torch.save(m_save_dict, "{}_{}.pth".format(qar_args.model_save_name, e)) eval_loss = evaluate_qa_retriever(qar_model, qar_valid_dset, qar_tokenizer, qar_args) print("Evaluation loss epoch {:4d}: {:.3f}".format(e, eval_loss)) ############### # ELI5 seq2seq model training ############### class ELI5DatasetS2S(Dataset): def __init__( self, examples_array, make_doc_fun=None, extra_answer_threshold=3, document_cache=None, training=True ): self.training = training self.data = examples_array self.make_doc_function = make_doc_fun self.document_cache = {} if document_cache is None else document_cache assert not (make_doc_fun is None and document_cache is None) # make index of specific question-answer pairs from multi-answers if self.training: self.qa_id_list = [ (i, j) for i, qa in enumerate(self.data) for j, (a, sc) in enumerate(zip(qa["answers"]["text"], qa["answers"]["score"])) if j == 0 or sc >= extra_answer_threshold ] else: self.qa_id_list = [(i, 0) for i in range(self.data.num_rows)] def __len__(self): return len(self.qa_id_list) def make_example(self, idx): i, j = self.qa_id_list[idx] example = self.data[i] question = example["title"] + " " + example["selftext"] answer = example["answers"]["text"][j] q_id = example["q_id"] if self.make_doc_function is not None: self.document_cache[q_id] = self.document_cache.get(q_id, self.make_doc_function(example["title"])) document = self.document_cache[q_id] in_st = "question: {} context: {}".format( question.lower().replace(" --t--", "").strip(), document.lower().strip(), ) out_st = answer return (in_st, out_st) def __getitem__(self, idx): return self.make_example(idx) def make_qa_s2s_model(model_name="facebook/bart-large", from_file=None, device="cuda:0"): tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) if from_file is not None: param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states model.load_state_dict(param_dict["model"]) return tokenizer, model def make_qa_s2s_batch(qa_list, tokenizer, max_len=64, max_a_len=360, device="cuda:0"): q_ls = [q for q, a in qa_list] a_ls = [a for q, a in qa_list] q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) a_toks = tokenizer(a_ls, max_length=min(max_len, max_a_len), padding="max_length", truncation=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) lm_labels = a_ids[:, 1:].contiguous().clone() lm_labels[a_mask[:, 1:].contiguous() == 0] = -100 model_inputs = { "input_ids": q_ids, "attention_mask": q_mask, "decoder_input_ids": a_ids[:, :-1].contiguous(), "lm_labels": lm_labels, } return model_inputs def train_qa_s2s_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0, curriculum=False): model.train() # make iterator if curriculum: train_sampler = SequentialSampler(dataset) else: train_sampler = RandomSampler(dataset) model_collate_fn = functools.partial( make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, batch_inputs in enumerate(epoch_iterator): pre_loss = model(**batch_inputs)[0] loss = pre_loss.sum() / pre_loss.shape[0] loss.backward() # optimizer if step % args.backward_freq == 0: optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0 or step == 1: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def eval_qa_s2s_epoch(model, dataset, tokenizer, args): model.eval() # make iterator train_sampler = SequentialSampler(dataset) model_collate_fn = functools.partial( make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() with torch.no_grad(): for step, batch_inputs in enumerate(epoch_iterator): pre_loss = model(**batch_inputs)[0] loss = pre_loss.sum() / pre_loss.shape[0] loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0: print( "{:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) print( "Total \t L: {:.3f} \t -- {:.3f}".format( loc_loss / loc_steps, time() - st_time, ) ) def train_qa_s2s(qa_s2s_model, qa_s2s_tokenizer, s2s_train_dset, s2s_valid_dset, s2s_args): s2s_optimizer = AdamW(qa_s2s_model.parameters(), lr=s2s_args.learning_rate, eps=1e-8) s2s_scheduler = get_linear_schedule_with_warmup( s2s_optimizer, num_warmup_steps=400, num_training_steps=(s2s_args.num_epochs + 1) * math.ceil(len(s2s_train_dset) / s2s_args.batch_size), ) for e in range(s2s_args.num_epochs): train_qa_s2s_epoch( qa_s2s_model, s2s_train_dset, qa_s2s_tokenizer, s2s_optimizer, s2s_scheduler, s2s_args, e, curriculum=(e == 0), ) m_save_dict = { "model": qa_s2s_model.state_dict(), "optimizer": s2s_optimizer.state_dict(), "scheduler": s2s_scheduler.state_dict(), } print("Saving model {}".format(s2s_args.model_save_name)) eval_qa_s2s_epoch(qa_s2s_model, s2s_valid_dset, qa_s2s_tokenizer, s2s_args) torch.save(m_save_dict, "{}_{}.pth".format(s2s_args.model_save_name, e)) # generate answer from input "question: ... context: <p> ..." def qa_s2s_generate( question_doc, qa_s2s_model, qa_s2s_tokenizer, num_answers=1, num_beams=None, min_len=64, max_len=256, do_sample=False, temp=1.0, top_p=None, top_k=None, max_input_length=512, device="cuda:0", ): model_inputs = make_qa_s2s_batch( [(question_doc, "A")], qa_s2s_tokenizer, max_input_length, device=device, ) n_beams = num_answers if num_beams is None else max(num_beams, num_answers) generated_ids = qa_s2s_model.generate( input_ids=model_inputs["input_ids"], attention_mask=model_inputs["attention_mask"], min_length=min_len, max_length=max_len, do_sample=do_sample, early_stopping=True, num_beams=1 if do_sample else n_beams, temperature=temp, top_k=top_k, top_p=top_p, eos_token_id=qa_s2s_tokenizer.eos_token_id, no_repeat_ngram_size=3, num_return_sequences=num_answers, decoder_start_token_id=qa_s2s_tokenizer.bos_token_id, ) return [qa_s2s_tokenizer.decode(ans_ids, skip_special_tokens=True).strip() for ans_ids in generated_ids] ############### # ELI5-trained retrieval model usage ############### def embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length=128, device="cuda:0"): a_toks = tokenizer(passages, max_length=max_length, padding="max_length", truncation=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) with torch.no_grad(): a_reps = qa_embedder.embed_answers(a_ids, a_mask).cpu().type(torch.float) return a_reps.numpy() def embed_questions_for_retrieval(q_ls, tokenizer, qa_embedder, device="cuda:0"): q_toks = tokenizer(q_ls, max_length=128, padding="max_length", truncation=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) with torch.no_grad(): q_reps = qa_embedder.embed_questions(q_ids, q_mask).cpu().type(torch.float) return q_reps.numpy() def make_qa_dense_index( qa_embedder, tokenizer, passages_dset, batch_size=512, max_length=128, index_name="kilt_passages_reps.dat", dtype="float32", device="cuda:0", ): st_time = time() fp = np.memmap(index_name, dtype=dtype, mode="w+", shape=(passages_dset.num_rows, 128)) n_batches = math.ceil(passages_dset.num_rows / batch_size) for i in range(n_batches): passages = list(passages_dset[i * batch_size : (i + 1) * batch_size]["passage_text"]) reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length, device) fp[i * batch_size : (i + 1) * batch_size] = reps if i % 50 == 0: print(i, time() - st_time) def evaluate_retriever(qa_list, retriever_func, scoring_func, n_ret=10, verbose=False): total_retriever_time = 0.0 total_retriever_score = 0.0 st_time = time() for i, (question, answer) in enumerate(qa_list): r_time = time() retrieved_passages = retriever_func(question, n_ret) total_retriever_time += time() - r_time total_retriever_score += scoring_func(retrieved_passages, answer) if verbose and ((i + 1) % 500 == 0 or i <= 1): print( "{:03d}: S-{:.4f} T-{:.4f} | {:.2f}".format( i + 1, total_retriever_score / (i + 1), total_retriever_time / (i + 1), time() - st_time ) ) return {"idf_recall": total_retriever_score / (i + 1), "retrieval_time": total_retriever_time / (i + 1)} # build a support document for the question out of Wikipedia snippets def query_qa_dense_index( question, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20, device="cuda:0" ): q_rep = embed_questions_for_retrieval([question], tokenizer, qa_embedder, device=device) D, I = wiki_index.search(q_rep, 2 * n_results) res_passages = [wiki_passages[int(i)] for i in I[0]] support_doc = "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] for r, sc in zip(res_list, D[0]): r["score"] = float(sc) return support_doc, res_list def batch_query_qa_dense_index(questions, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10): q_rep = embed_questions_for_retrieval(questions, tokenizer, qa_embedder) D, I = wiki_index.search(q_rep, n_results) res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I] support_doc_lst = [ "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst ] all_res_lists = [] for res_passages, dl in zip(res_passages_lst, D): res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] for r, sc in zip(res_list, dl): r["score"] = float(sc) all_res_lists += [res_list[:]] return support_doc_lst, all_res_lists # find nearest neighbors of an answer or declarative text in Wikipedia snippets def query_qa_dense_index_nn(passage, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20): a_rep = embed_passages_for_retrieval([passage], tokenizer, qa_embedder) D, I = wiki_index.search(a_rep, 2 * n_results) res_passages = [wiki_passages[int(i)] for i in I[0]] support_doc = "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] for r, sc, i in zip(res_list, D[0], I[0]): r["passage_id"] = int(i) r["score"] = float(sc) return support_doc, res_list def batch_query_qa_dense_index_nn(passages, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10): a_reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder) D, I = wiki_index.search(a_reps, n_results) res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I] support_doc_lst = [ "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst ] all_res_lists = [] for res_passages, dl, il in zip(res_passages_lst, D, I): res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] for r, sc, i in zip(res_list, dl, il): r["passage_id"] = int(i) r["score"] = float(sc) all_res_lists += [res_list[:]] return support_doc_lst, all_res_lists
transformers/examples/research_projects/longform-qa/eli5_utils.py/0
{ "file_path": "transformers/examples/research_projects/longform-qa/eli5_utils.py", "repo_id": "transformers", "token_count": 13301 }
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) def __post_init__(self): if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def add_chinese_references(dataset, ref_file): with open(ref_file, "r", encoding="utf-8") as f: refs = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] assert len(dataset) == len(refs) dataset_dict = {c: dataset[c] for c in dataset.column_names} dataset_dict["chinese_ref"] = refs return Dataset.from_dict(dataset_dict) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) else: logger.info("Training new model from scratch") model = AutoModelForMaskedLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] padding = "max_length" if data_args.pad_to_max_length else False def tokenize_function(examples): # Remove empty lines examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()] return tokenizer(examples["text"], padding=padding, truncation=True, max_length=data_args.max_seq_length) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) # Add the chinese references if provided if data_args.train_ref_file is not None: tokenized_datasets["train"] = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file) if data_args.validation_ref_file is not None: tokenized_datasets["validation"] = add_chinese_references( tokenized_datasets["validation"], data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer has_ref = data_args.train_ref_file or data_args.validation_ref_file if has_ref: training_args.remove_unused_columns = False # Data collator # This one will take care of randomly masking the tokens. data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"] if training_args.do_train else None, eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: if last_checkpoint is not None: checkpoint = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload output_train_file = os.path.join(training_args.output_dir, "train_results.txt") if trainer.is_world_process_zero(): with open(output_train_file, "w") as writer: logger.info("***** Train results *****") for key, value in sorted(train_result.metrics.items()): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") eval_output = trainer.evaluate() perplexity = math.exp(eval_output["eval_loss"]) results["perplexity"] = perplexity output_eval_file = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt") if trainer.is_world_process_zero(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in sorted(results.items()): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/research_projects/mlm_wwm/run_mlm_wwm.py/0
{ "file_path": "transformers/examples/research_projects/mlm_wwm/run_mlm_wwm.py", "repo_id": "transformers", "token_count": 7241 }
# Sample script to finetune RAG using Ray for distributed retrieval. # Add parent directory to python path to access lightning_base.py export PYTHONPATH="../":"${PYTHONPATH}" #creates the custom knowlegebase python use_own_knowledge_dataset.py \ --csv_path /DIR/SQUAD-KB/squad-kb.csv \ --output_dir /DIR/SQUAD-KB # Start a single-node Ray cluster. ray start --head # A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path # run ./examples/rag/finetune_rag_ray.sh --help to see all the possible options python finetune_rag.py \ --data_dir /DIR/squad-training-data \ --output_dir /DIR/model_checkpoints \ --model_name_or_path facebook/rag-token-base \ --model_type rag_token \ --fp16 \ --gpus 2 \ --profile \ --do_train \ --end2end \ --do_predict \ --n_val -1 \ --train_batch_size 4 \ --eval_batch_size 1 \ --max_source_length 128 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-05 \ --num_train_epochs 10 \ --warmup_steps 500 \ --gradient_accumulation_steps 8 \ --distributed_retriever ray \ --num_retrieval_workers 4 \ --passages_path /DIR/SQUAD-KB/my_knowledge_dataset \ --index_path /DIR/SQUAD-KB/my_knowledge_dataset_hnsw_index.faiss \ --index_name custom \ --context_encoder_name facebook/dpr-ctx_encoder-multiset-base \ --csv_path /DIR/SQUAD-KB/squad-kb.csv \ --index_gpus 1 \ --gpu_order [5,6,7,8,9,0,1,2,3,4] \ --shard_dir ./test_dir/kb-shards \ --indexing_freq 500 # Stop the Ray cluster. ray stop #this script was used to test the SQuAD data. #change the dir paramater acording to your prefernece. #please use the same device ordere when running CUDA_VISIBLE_DEVICES=5,6,7,8,9,0,1,2,3,4 sh finetune_rag_ray_end2end.sh
transformers/examples/research_projects/rag-end2end-retriever/finetune_rag_ray_end2end.sh/0
{ "file_path": "transformers/examples/research_projects/rag-end2end-retriever/finetune_rag_ray_end2end.sh", "repo_id": "transformers", "token_count": 876 }
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer def encode_line(tokenizer, line, max_length, padding_side, pad_to_max_length=True, return_tensors="pt"): extra_kw = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) and not line.startswith(" ") else {} tokenizer.padding_side = padding_side return tokenizer( [line], max_length=max_length, padding="max_length" if pad_to_max_length else None, truncation=True, return_tensors=return_tensors, add_special_tokens=True, **extra_kw, ) def trim_batch( input_ids, pad_token_id, attention_mask=None, ): """Remove columns that are populated exclusively by pad_token_id""" keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class Seq2SeqDataset(Dataset): def __init__( self, tokenizer, data_dir, max_source_length, max_target_length, type_path="train", n_obs=None, src_lang=None, tgt_lang=None, prefix="", ): super().__init__() self.src_file = Path(data_dir).joinpath(type_path + ".source") self.tgt_file = Path(data_dir).joinpath(type_path + ".target") self.src_lens = self.get_char_lens(self.src_file) self.max_source_length = max_source_length self.max_target_length = max_target_length assert min(self.src_lens) > 0, f"found empty line in {self.src_file}" self.tokenizer = tokenizer self.prefix = prefix if n_obs is not None: self.src_lens = self.src_lens[:n_obs] self.src_lang = src_lang self.tgt_lang = tgt_lang def __len__(self): return len(self.src_lens) def __getitem__(self, index) -> Dict[str, torch.Tensor]: index = index + 1 # linecache starts at 1 source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer, T5Tokenizer): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right source_tokenizer = ( self.tokenizer.question_encoder if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer ) target_tokenizer = self.tokenizer.generator if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer source_inputs = encode_line(source_tokenizer, source_line, self.max_source_length, "right") target_inputs = encode_line(target_tokenizer, tgt_line, self.max_target_length, "right") source_ids = source_inputs["input_ids"].squeeze() target_ids = target_inputs["input_ids"].squeeze() src_mask = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def get_char_lens(data_file): return [len(x) for x in Path(data_file).open().readlines()] def collate_fn(self, batch) -> Dict[str, torch.Tensor]: input_ids = torch.stack([x["input_ids"] for x in batch]) masks = torch.stack([x["attention_mask"] for x in batch]) target_ids = torch.stack([x["decoder_input_ids"] for x in batch]) tgt_pad_token_id = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer.pad_token_id ) src_pad_token_id = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer.pad_token_id ) y = trim_batch(target_ids, tgt_pad_token_id) source_ids, source_mask = trim_batch(input_ids, src_pad_token_id, attention_mask=masks) batch = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch logger = getLogger(__name__) def flatten_list(summary_ids: List[List]): return list(itertools.chain.from_iterable(summary_ids)) def save_git_info(folder_path: str) -> None: """Save git information to output_dir/git_log.json""" repo_infos = get_git_info() save_json(repo_infos, os.path.join(folder_path, "git_log.json")) def save_json(content, path, indent=4, **json_dump_kwargs): with open(path, "w") as f: json.dump(content, f, indent=indent, **json_dump_kwargs) def load_json(path): with open(path) as f: return json.load(f) def get_git_info(): repo = git.Repo(search_parent_directories=True) repo_infos = { "repo_id": str(repo), "repo_sha": str(repo.head.object.hexsha), "repo_branch": str(repo.active_branch), "hostname": str(socket.gethostname()), } return repo_infos def lmap(f: Callable, x: Iterable) -> List: """list(map(f, x))""" return list(map(f, x)) def pickle_save(obj, path): """pickle.dump(obj, path)""" with open(path, "wb") as f: return pickle.dump(obj, f) def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def f1_score(prediction, ground_truth): prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def exact_match_score(prediction, ground_truth): return normalize_answer(prediction) == normalize_answer(ground_truth) def calculate_exact_match(output_lns: List[str], reference_lns: List[str]) -> Dict: assert len(output_lns) == len(reference_lns) em = 0 for hypo, pred in zip(output_lns, reference_lns): em += exact_match_score(hypo, pred) if len(output_lns) > 0: em /= len(output_lns) return {"em": em} def is_rag_model(model_prefix): return model_prefix.startswith("rag") def set_extra_model_params(extra_params, hparams, config): equivalent_param = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead equivalent_param["dropout"] = "dropout_rate" for p in extra_params: if getattr(hparams, p, None): if not hasattr(config, p) and not hasattr(config, equivalent_param[p]): logger.info("config doesn't have a `{}` attribute".format(p)) delattr(hparams, p) continue set_p = p if hasattr(config, p) else equivalent_param[p] setattr(config, set_p, getattr(hparams, p)) delattr(hparams, p) return hparams, config
transformers/examples/research_projects/rag/utils_rag.py/0
{ "file_path": "transformers/examples/research_projects/rag/utils_rag.py", "repo_id": "transformers", "token_count": 3495 }
#!/usr/bin/env python import os from pathlib import Path from typing import Dict, List import fire import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from transformers.utils.logging import get_logger logger = get_logger(__name__) def remove_prefix(text: str, prefix: str): if text.startswith(prefix): return text[len(prefix) :] return text # or whatever def sanitize(sd): return {remove_prefix(k, "model."): v for k, v in sd.items()} def average_state_dicts(state_dicts: List[Dict[str, torch.Tensor]]): new_sd = {} for k in state_dicts[0].keys(): tensors = [sd[k] for sd in state_dicts] new_t = sum(tensors) / len(tensors) assert isinstance(new_t, torch.Tensor) new_sd[k] = new_t return new_sd def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None: """Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict. Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once! Args: pl_ckpt_path (:obj:`str`): Path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files. If a directory is passed, all .ckpt files inside it will be averaged! hf_src_model_dir (:obj:`str`): Path to a directory containing a correctly shaped checkpoint save_path (:obj:`str`): Directory to save the new model """ hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir) if os.path.isfile(pl_ckpt_path): ckpt_files = [pl_ckpt_path] else: assert os.path.isdir(pl_ckpt_path) ckpt_files = list(Path(pl_ckpt_path).glob("*.ckpt")) assert ckpt_files, f"could not find any ckpt files inside the {pl_ckpt_path} directory" if len(ckpt_files) > 1: logger.info(f"averaging the weights of {ckpt_files}") state_dicts = [sanitize(torch.load(x, map_location="cpu")["state_dict"]) for x in ckpt_files] state_dict = average_state_dicts(state_dicts) missing, unexpected = hf_model.load_state_dict(state_dict, strict=False) assert not missing, f"missing keys: {missing}" hf_model.save_pretrained(save_path) try: tok = AutoTokenizer.from_pretrained(hf_src_model_dir) tok.save_pretrained(save_path) except Exception: pass # dont copy tokenizer if cant if __name__ == "__main__": fire.Fire(convert_pl_to_hf)
transformers/examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py", "repo_id": "transformers", "token_count": 1017 }
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" export BS=32 export GAS=1 python finetune.py \ --learning_rate=3e-5 \ --fp16 \ --gpus 1 \ --do_train \ --do_predict \ --val_check_interval 0.25 \ --n_val 500 \ --num_train_epochs 2 \ --freeze_encoder --freeze_embeds --data_dir cnn_dm \ --max_target_length 142 --val_max_target_length=142 \ --train_batch_size=$BS --eval_batch_size=$BS --gradient_accumulation_steps=$GAS \ --model_name_or_path sshleifer/student_cnn_12_6 \ --tokenizer_name facebook/bart-large \ --warmup_steps 500 \ --output_dir distilbart-cnn-12-6 \ "$@"
transformers/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh", "repo_id": "transformers", "token_count": 292 }
# Parts of the code are adapted from the snippets provided in the TorchAudio Wav2Vec forced alignment tutorial. # The full tutorial can be found here: https://pytorch.org/audio/stable/tutorials/forced_alignment_tutorial.html import argparse import os from dataclasses import dataclass import torch import torchaudio from tqdm import tqdm from transformers import AutoConfig, AutoModelForCTC, AutoProcessor class Wav2Vec2Aligner: def __init__(self, model_name, input_wavs_sr, cuda): self.cuda = cuda self.config = AutoConfig.from_pretrained(model_name) self.model = AutoModelForCTC.from_pretrained(model_name) self.model.eval() if self.cuda: self.model.to(device="cuda") self.processor = AutoProcessor.from_pretrained(model_name) self.resampler = torchaudio.transforms.Resample(input_wavs_sr, 16_000) blank_id = 0 vocab = list(self.processor.tokenizer.get_vocab().keys()) for i in range(len(vocab)): if vocab[i] == "[PAD]" or vocab[i] == "<pad>": blank_id = i print("Blank Token id [PAD]/<pad>", blank_id) self.blank_id = blank_id def speech_file_to_array_fn(self, wav_path): speech_array, sampling_rate = torchaudio.load(wav_path) speech = self.resampler(speech_array).squeeze().numpy() return speech def align_single_sample(self, item): blank_id = self.blank_id transcript = "|".join(item["sent"].split(" ")) if not os.path.isfile(item["wav_path"]): print(item["wav_path"], "not found in wavs directory") speech_array = self.speech_file_to_array_fn(item["wav_path"]) inputs = self.processor(speech_array, sampling_rate=16_000, return_tensors="pt", padding=True) if self.cuda: inputs = inputs.to(device="cuda") with torch.no_grad(): logits = self.model(inputs.input_values).logits # get the emission probability at frame level emissions = torch.log_softmax(logits, dim=-1) emission = emissions[0].cpu().detach() # get labels from vocab labels = ([""] + list(self.processor.tokenizer.get_vocab().keys()))[ :-1 ] # logits don't align with the tokenizer's vocab dictionary = {c: i for i, c in enumerate(labels)} tokens = [] for c in transcript: if c in dictionary: tokens.append(dictionary[c]) def get_trellis(emission, tokens, blank_id=0): """ Build a trellis matrix of shape (num_frames + 1, num_tokens + 1) that represents the probabilities of each source token being at a certain time step """ num_frames = emission.size(0) num_tokens = len(tokens) # Trellis has extra diemsions for both time axis and tokens. # The extra dim for tokens represents <SoS> (start-of-sentence) # The extra dim for time axis is for simplification of the code. trellis = torch.full((num_frames + 1, num_tokens + 1), -float("inf")) trellis[:, 0] = 0 for t in range(num_frames): trellis[t + 1, 1:] = torch.maximum( # Score for staying at the same token trellis[t, 1:] + emission[t, blank_id], # Score for changing to the next token trellis[t, :-1] + emission[t, tokens], ) return trellis trellis = get_trellis(emission, tokens, blank_id) @dataclass class Point: token_index: int time_index: int score: float def backtrack(trellis, emission, tokens, blank_id=0): """ Walk backwards from the last (sentence_token, time_step) pair to build the optimal sequence alignment path """ # Note: # j and t are indices for trellis, which has extra dimensions # for time and tokens at the beginning. # When referring to time frame index `T` in trellis, # the corresponding index in emission is `T-1`. # Similarly, when referring to token index `J` in trellis, # the corresponding index in transcript is `J-1`. j = trellis.size(1) - 1 t_start = torch.argmax(trellis[:, j]).item() path = [] for t in range(t_start, 0, -1): # 1. Figure out if the current position was stay or change # Note (again): # `emission[J-1]` is the emission at time frame `J` of trellis dimension. # Score for token staying the same from time frame J-1 to T. stayed = trellis[t - 1, j] + emission[t - 1, blank_id] # Score for token changing from C-1 at T-1 to J at T. changed = trellis[t - 1, j - 1] + emission[t - 1, tokens[j - 1]] # 2. Store the path with frame-wise probability. prob = emission[t - 1, tokens[j - 1] if changed > stayed else 0].exp().item() # Return token index and time index in non-trellis coordinate. path.append(Point(j - 1, t - 1, prob)) # 3. Update the token if changed > stayed: j -= 1 if j == 0: break else: raise ValueError("Failed to align") return path[::-1] path = backtrack(trellis, emission, tokens, blank_id) @dataclass class Segment: label: str start: int end: int score: float def __repr__(self): return f"{self.label}\t{self.score:4.2f}\t{self.start*20:5d}\t{self.end*20:5d}" @property def length(self): return self.end - self.start def merge_repeats(path): """ Merge repeated tokens into a single segment. Note: this shouldn't affect repeated characters from the original sentences (e.g. `ll` in `hello`) """ i1, i2 = 0, 0 segments = [] while i1 < len(path): while i2 < len(path) and path[i1].token_index == path[i2].token_index: i2 += 1 score = sum(path[k].score for k in range(i1, i2)) / (i2 - i1) segments.append( Segment( transcript[path[i1].token_index], path[i1].time_index, path[i2 - 1].time_index + 1, score, ) ) i1 = i2 return segments segments = merge_repeats(path) with open(item["out_path"], "w") as out_align: for seg in segments: out_align.write(str(seg) + "\n") def align_data(self, wav_dir, text_file, output_dir): if not os.path.exists(output_dir): os.makedirs(output_dir) # load text file lines = open(text_file, encoding="utf8").readlines() items = [] for line in lines: if len(line.strip().split("\t")) != 2: print("Script must be in format: 00001 this is my sentence") exit() wav_name, sentence = line.strip().split("\t") wav_path = os.path.join(wav_dir, wav_name + ".wav") out_path = os.path.join(output_dir, wav_name + ".txt") items.append({"sent": sentence, "wav_path": wav_path, "out_path": out_path}) print("Number of samples found in script file", len(items)) for item in tqdm(items): self.align_single_sample(item) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="arijitx/wav2vec2-xls-r-300m-bengali", help="wav2vec model name" ) parser.add_argument("--wav_dir", type=str, default="./wavs", help="directory containing wavs") parser.add_argument("--text_file", type=str, default="script.txt", help="file containing text") parser.add_argument("--input_wavs_sr", type=int, default=16000, help="sampling rate of input audios") parser.add_argument( "--output_dir", type=str, default="./out_alignment", help="output directory containing the alignment files" ) parser.add_argument("--cuda", action="store_true") args = parser.parse_args() aligner = Wav2Vec2Aligner(args.model_name, args.input_wavs_sr, args.cuda) aligner.align_data(args.wav_dir, args.text_file, args.output_dir) if __name__ == "__main__": main()
transformers/examples/research_projects/wav2vec2/alignment.py/0
{ "file_path": "transformers/examples/research_projects/wav2vec2/alignment.py", "repo_id": "transformers", "token_count": 4170 }
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # XTREME-S benchmark examples *Maintainers: [Anton Lozhkov](https://github.com/anton-l) and [Patrick von Platen](https://github.com/patrickvonplaten)* The Cross-lingual TRansfer Evaluation of Multilingual Encoders for Speech (XTREME-S) benchmark is a benchmark designed to evaluate speech representations across languages, tasks, domains and data regimes. It covers XX typologically diverse languages and seven downstream tasks grouped in four families: speech recognition, translation, classification and retrieval. XTREME-S covers speech recognition with Fleurs, Multilingual LibriSpeech (MLS) and VoxPopuli, speech translation with CoVoST-2, speech classification with LangID (Fleurs) and intent classification (MInds-14) and finally speech(-text) retrieval with Fleurs. Each of the tasks covers a subset of the 102 languages included in XTREME-S (shown here with their ISO 3166-1 codes): afr, amh, ara, asm, ast, azj, bel, ben, bos, cat, ceb, ces, cmn, cym, dan, deu, ell, eng, spa, est, fas, ful, fin, tgl, fra, gle, glg, guj, hau, heb, hin, hrv, hun, hye, ind, ibo, isl, ita, jpn, jav, kat, kam, kea, kaz, khm, kan, kor, ckb, kir, ltz, lug, lin, lao, lit, luo, lav, mri, mkd, mal, mon, mar, msa, mlt, mya, nob, npi, nld, nso, nya, oci, orm, ory, pan, pol, pus, por, ron, rus, bul, snd, slk, slv, sna, som, srp, swe, swh, tam, tel, tgk, tha, tur, ukr, umb, urd, uzb, vie, wol, xho, yor, yue and zul. Paper: [XTREME-S: Evaluating Cross-lingual Speech Representations](https://arxiv.org/abs/2203.10752) Dataset: [https://huggingface.co/datasets/google/xtreme_s](https://huggingface.co/datasets/google/xtreme_s) ## Fine-tuning for the XTREME-S tasks Based on the [`run_xtreme_s.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/xtreme-s/run_xtreme_s.py) script. This script can fine-tune any of the pretrained speech models on the [hub](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition) on the [XTREME-S dataset](https://huggingface.co/datasets/google/xtreme_s) tasks. XTREME-S is made up of 7 different tasks. Here is how to run the script on each of them: ```bash export TASK_NAME=mls.all python run_xtreme_s.py \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --task="${TASK_NAME}" \ --output_dir="xtreme_s_xlsr_${TASK_NAME}" \ --num_train_epochs=100 \ --per_device_train_batch_size=32 \ --learning_rate="3e-4" \ --target_column_name="transcription" \ --save_steps=500 \ --eval_steps=500 \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --push_to_hub ``` where `TASK_NAME` can be one of: `mls, voxpopuli, covost2, fleurs-asr, fleurs-lang_id, minds14`. We get the following results on the test set of the benchmark's datasets. The corresponding training commands for each dataset are given in the sections below: | Task | Dataset | Result | Fine-tuned model & logs | Training time | GPUs | |-----------------------|-----------|-----------------------|--------------------------------------------------------------------|---------------|--------| | Speech Recognition | MLS | 30.33 WER | [here](https://huggingface.co/anton-l/xtreme_s_xlsr_300m_mls/) | 18:47:25 | 8xV100 | | Speech Recognition | VoxPopuli | - | - | - | - | | Speech Recognition | FLEURS | - | - | - | - | | Speech Translation | CoVoST-2 | - | - | - | - | | Speech Classification | Minds-14 | 90.15 F1 / 90.33 Acc. | [here](https://huggingface.co/anton-l/xtreme_s_xlsr_300m_minds14/) | 2:54:21 | 2xA100 | | Speech Classification | FLEURS | - | - | - | - | | Speech Retrieval | FLEURS | - | - | - | - | ### Speech Recognition with MLS The following command shows how to fine-tune the [XLS-R](https://huggingface.co/docs/transformers/main/model_doc/xls_r) model on [XTREME-S MLS](https://huggingface.co/datasets/google/xtreme_s#multilingual-librispeech-mls) using 8 GPUs in half-precision. ```bash python -m torch.distributed.launch \ --nproc_per_node=8 \ run_xtreme_s.py \ --task="mls" \ --language="all" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --output_dir="xtreme_s_xlsr_300m_mls" \ --overwrite_output_dir \ --num_train_epochs=100 \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=1 \ --gradient_accumulation_steps=2 \ --learning_rate="3e-4" \ --warmup_steps=3000 \ --eval_strategy="steps" \ --max_duration_in_seconds=20 \ --save_steps=500 \ --eval_steps=500 \ --logging_steps=1 \ --layerdrop=0.0 \ --mask_time_prob=0.3 \ --mask_time_length=10 \ --mask_feature_prob=0.1 \ --mask_feature_length=64 \ --freeze_feature_encoder \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --metric_for_best_model="wer" \ --greater_is_better=False \ --load_best_model_at_end \ --push_to_hub ``` On 8 V100 GPUs, this script should run in ~19 hours and yield a cross-entropy loss of **0.6215** and word error rate of **30.33** ### Speech Classification with Minds-14 The following command shows how to fine-tune the [XLS-R](https://huggingface.co/docs/transformers/main/model_doc/xls_r) model on [XTREME-S MLS](https://huggingface.co/datasets/google/xtreme_s#intent-classification---minds-14) using 2 GPUs in half-precision. ```bash python -m torch.distributed.launch \ --nproc_per_node=2 \ run_xtreme_s.py \ --task="minds14" \ --language="all" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --output_dir="xtreme_s_xlsr_300m_minds14" \ --overwrite_output_dir \ --num_train_epochs=50 \ --per_device_train_batch_size=32 \ --per_device_eval_batch_size=8 \ --gradient_accumulation_steps=1 \ --learning_rate="3e-4" \ --warmup_steps=1500 \ --eval_strategy="steps" \ --max_duration_in_seconds=30 \ --save_steps=200 \ --eval_steps=200 \ --logging_steps=1 \ --layerdrop=0.0 \ --mask_time_prob=0.3 \ --mask_time_length=10 \ --mask_feature_prob=0.1 \ --mask_feature_length=64 \ --freeze_feature_encoder \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --metric_for_best_model="f1" \ --greater_is_better=True \ --load_best_model_at_end \ --push_to_hub ``` On 2 A100 GPUs, this script should run in ~5 hours and yield a cross-entropy loss of **0.4119** and F1 score of **90.15**
transformers/examples/research_projects/xtreme-s/README.md/0
{ "file_path": "transformers/examples/research_projects/xtreme-s/README.md", "repo_id": "transformers", "token_count": 3399 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for summarization. """ # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. import json import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import nltk # Here to have a nice missing dependency error message early on import numpy as np import tensorflow as tf from datasets import load_dataset from filelock import FileLock import transformers from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForSeq2Seq, HfArgumentParser, KerasMetricCallback, PushToHubCallback, TFAutoModelForSeq2SeqLM, TFTrainingArguments, create_optimizer, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, is_offline_mode, send_example_telemetry from transformers.utils.versions import require_version # region Checking dependencies # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") logger = logging.getLogger(__name__) try: nltk.data.find("tokenizers/punkt") except (LookupError, OSError): if is_offline_mode(): raise LookupError( "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" ) with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) # endregion # region Arguments @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) text_column: Optional[str] = field( default=None, metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."}, ) summary_column: Optional[str] = field( default=None, metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."}, ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} ) validation_file: Optional[str] = field( default=None, metadata={ "help": ( "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." ) }, ) test_file: Optional[str] = field( default=None, metadata={ "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_source_length: Optional[int] = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) num_beams: Optional[int] = field( default=1, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " "which is used during ``evaluate`` and ``predict``." ) }, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={ "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." }, ) source_prefix: Optional[str] = field( default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."} ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.val_max_target_length is None: self.val_max_target_length = self.max_target_length # endregion # region Dataset name mappings summarization_name_mapping = { "amazon_reviews_multi": ("review_body", "review_title"), "big_patent": ("description", "abstract"), "cnn_dailymail": ("article", "highlights"), "orange_sum": ("text", "summary"), "pn_summary": ("article", "summary"), "psc": ("extract_text", "summary_text"), "samsum": ("dialogue", "summary"), "thaisum": ("body", "summary"), "xglue": ("news_body", "news_title"), "xsum": ("document", "summary"), "wiki_summary": ("article", "highlights"), "multi_news": ("document", "summary"), } # endregion def main(): # region Argument parsing # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_summarization", model_args, data_args, framework="tensorflow") # endregion # region Logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity(logging.INFO) transformers.utils.logging.set_verbosity(logging.INFO) # Log on each process the small summary: logger.info(f"Training/evaluation parameters {training_args}") # endregion # region T5 special-casing if data_args.source_prefix is None and model_args.model_name_or_path in [ "google-t5/t5-small", "google-t5/t5-base", "google-t5/t5-large", "google-t5/t5-3b", "google-t5/t5-11b", ]: logger.warning( "You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with " "`--source_prefix 'summarize: ' `" ) # endregion # region Detecting last checkpoint last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # endregion # Set seed before initializing model. set_seed(training_args.seed) # region Load datasets # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files this script will use the first column for the full texts and the second column for the # summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # endregion # region Load model config and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) prefix = data_args.source_prefix if data_args.source_prefix is not None else "" # endregion # region Dataset preprocessing # We need to tokenize inputs and targets. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, and/or `do_eval`.") return # Get the column names for input/target. dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None) if data_args.text_column is None: text_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: text_column = data_args.text_column if text_column not in column_names: raise ValueError( f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.summary_column is None: summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: summary_column = data_args.summary_column if summary_column not in column_names: raise ValueError( f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}" ) # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length padding = "max_length" if data_args.pad_to_max_length else False def preprocess_function(examples): inputs = examples[text_column] targets = examples[summary_column] inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) else: train_dataset = None if training_args.do_eval: max_target_length = data_args.val_max_target_length if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) else: eval_dataset = None # endregion # region Text preprocessing def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [label.strip() for label in labels] # rougeLSum expects newline after each sentence preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds] labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels] return preds, labels # endregion with training_args.strategy.scope(): # region Prepare model model = TFAutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embeddings = model.get_input_embeddings() # Matt: This is a temporary workaround as we transition our models to exclusively using Keras embeddings. # As soon as the transition is complete, all embeddings should be keras.Embeddings layers, and # the weights will always be in embeddings.embeddings. if hasattr(embeddings, "embeddings"): embedding_size = embeddings.embeddings.shape[0] else: embedding_size = embeddings.weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # endregion # region Prepare TF Dataset objects if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=128, # Reduce the number of unique shapes for XLA, especially for generation return_tensors="np", ) dataset_options = tf.data.Options() dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names # yourself if you use this method, whereas they are automatically inferred from the model input names when # using model.prepare_tf_dataset() # For more info see the docs: # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset tf_train_dataset = model.prepare_tf_dataset( train_dataset, collate_fn=data_collator, batch_size=total_train_batch_size, shuffle=True, ).with_options(dataset_options) tf_eval_dataset = model.prepare_tf_dataset( eval_dataset, collate_fn=data_collator, batch_size=total_eval_batch_size, shuffle=False, ).with_options(dataset_options) # endregion # region Optimizer, loss and LR scheduling num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) if training_args.warmup_steps > 0: num_warmup_steps = training_args.warmup_steps elif training_args.warmup_ratio > 0: num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) else: num_warmup_steps = 0 if training_args.do_train: optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm, ) else: optimizer = "sgd" # Just write anything because we won't be using it # endregion # region Metric and KerasMetricCallback if training_args.do_eval: metric = evaluate.load("rouge", cache_dir=model_args.cache_dir) if data_args.val_max_target_length is None: data_args.val_max_target_length = data_args.max_target_length gen_kwargs = { "max_length": data_args.val_max_target_length if data_args is not None else config.max_length, "num_beams": data_args.num_beams, "no_repeat_ngram_size": 0, # Not supported under XLA right now, and some models set it by default } def compute_metrics(preds): predictions, labels = preds if isinstance(predictions, tuple): predictions = predictions[0] decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) metrics = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) # Only print the mid f-measures, but there are a lot of other statistics in there too! metrics = {key: round(val.mid.fmeasure * 100, 4) for key, val in metrics.items()} return metrics # The KerasMetricCallback allows metrics that are too complex to write as standard Keras metrics # to be computed each epoch. Any Python code can be included in the metric_fn. This is especially # useful for metrics like BLEU and ROUGE that perform string comparisons on decoded model outputs. # For more information, see the docs at # https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.KerasMetricCallback metric_callback = KerasMetricCallback( metric_fn=compute_metrics, eval_dataset=tf_eval_dataset, predict_with_generate=True, use_xla_generation=True, generate_kwargs=gen_kwargs, ) callbacks = [metric_callback] else: callbacks = [] # endregion # region Preparing push_to_hub and model card push_to_hub_model_id = training_args.push_to_hub_model_id model_name = model_args.model_name_or_path.split("/")[-1] if not push_to_hub_model_id: if data_args.dataset_name is not None: push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" else: push_to_hub_model_id = f"{model_name}-finetuned-summarization" model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"} if data_args.dataset_name is not None: model_card_kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: model_card_kwargs["dataset_args"] = data_args.dataset_config_name model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: model_card_kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: # Because this training can be quite long, we save once per epoch. callbacks.append( PushToHubCallback( output_dir=training_args.output_dir, hub_model_id=push_to_hub_model_id, hub_token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs, ) ) # endregion # region Training # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=optimizer, jit_compile=training_args.xla) eval_metrics = None if training_args.do_train: logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {training_args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size = {total_train_batch_size}") logger.info(f" Total optimization steps = {num_train_steps}") if training_args.xla and not data_args.pad_to_max_length: logger.warning( "XLA training may be slow at first when --pad_to_max_length is not set " "until all possible shapes have been compiled." ) history = model.fit(tf_train_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) eval_metrics = {key: val[-1] for key, val in history.history.items()} # endregion # region Validation if training_args.do_eval and not training_args.do_train: # Do a standalone evaluation run logger.info("Evaluation...") # Compiling generation with XLA yields enormous speedups, see https://huggingface.co/blog/tf-xla-generate @tf.function(jit_compile=True) def generate(**kwargs): return model.generate(**kwargs) for batch, labels in tf_eval_dataset: batch.update(gen_kwargs) generated_tokens = generate(**batch) if isinstance(generated_tokens, tuple): generated_tokens = generated_tokens[0] decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) metric.add_batch(predictions=decoded_preds, references=decoded_labels) eval_metrics = metric.compute(use_stemmer=True) result = {key: round(val.mid.fmeasure * 100, 4) for key, val in eval_metrics.items()} logger.info(result) # endregion if training_args.output_dir is not None and eval_metrics is not None: output_eval_file = os.path.join(training_args.output_dir, "all_results.json") with open(output_eval_file, "w") as writer: writer.write(json.dumps(eval_metrics)) if training_args.output_dir is not None and not training_args.push_to_hub: # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) if __name__ == "__main__": main()
transformers/examples/tensorflow/summarization/run_summarization.py/0
{ "file_path": "transformers/examples/tensorflow/summarization/run_summarization.py", "repo_id": "transformers", "token_count": 13633 }
import torch from transformers import pipeline, AutoTokenizer, AutoModel, AutoModelForMaskedLM import time test_sentence = 'Do you [MASK] the muffin man?' # for comparison bert = pipeline('fill-mask', model = 'bert-base-uncased') print('\n'.join([d['sequence'] for d in bert(test_sentence)])) deberta = pipeline('fill-mask', model = 'microsoft/deberta-v3-base', model_kwargs={"legacy": False}) print('\n'.join([d['sequence'] for d in deberta(test_sentence)])) tokenizer = AutoTokenizer.from_pretrained("microsoft/deberta-v3-base") tokenized_dict = tokenizer( ["Is this working",], ["Not yet",], return_tensors="pt" ) deberta.model.forward = torch.compile(deberta.model.forward) start=time.time() deberta.model(**tokenized_dict) end=time.time() print(end-start) start=time.time() deberta.model(**tokenized_dict) end=time.time() print(end-start) start=time.time() deberta.model(**tokenized_dict) end=time.time() print(end-start) model = AutoModel.from_pretrained('microsoft/deberta-base') model.config.return_dict = False model.config.output_hidden_states=False input_tuple = (tokenized_dict['input_ids'], tokenized_dict['attention_mask']) start=time.time() traced_model = torch.jit.trace(model, input_tuple) end=time.time() print(end-start) start=time.time() traced_model(tokenized_dict['input_ids'], tokenized_dict['attention_mask']) end=time.time() print(end-start) start=time.time() traced_model(tokenized_dict['input_ids'], tokenized_dict['attention_mask']) end=time.time() print(end-start) start=time.time() traced_model(tokenized_dict['input_ids'], tokenized_dict['attention_mask']) end=time.time() print(end-start) start=time.time() traced_model(tokenized_dict['input_ids'], tokenized_dict['attention_mask']) end=time.time() print(end-start) torch.jit.save(traced_model, "compiled_deberta.pt") # my_script_module = torch.jit.script(model)
transformers/scripts/deberta_scrtipt.py/0
{ "file_path": "transformers/scripts/deberta_scrtipt.py", "repo_id": "transformers", "token_count": 695 }
# Copyright 2021 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ import os from datetime import datetime as dt import github.GithubException from github import Github LABELS_TO_EXEMPT = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def main(): g = Github(os.environ["GITHUB_TOKEN"]) repo = g.get_repo("huggingface/transformers") open_issues = repo.get_issues(state="open") for i, issue in enumerate(open_issues): print(i, issue) comments = sorted(list(issue.get_comments()), key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at.replace(tzinfo=None)).days > 7 and (dt.utcnow() - issue.created_at.replace(tzinfo=None)).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") try: issue.edit(state="closed") except github.GithubException as e: print("Couldn't close the issue:", repr(e)) elif ( (dt.utcnow() - issue.updated_at.replace(tzinfo=None)).days > 23 and (dt.utcnow() - issue.created_at.replace(tzinfo=None)).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would add stale comment to {issue.number}") try: issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) except github.GithubException as e: print("Couldn't create comment:", repr(e)) if __name__ == "__main__": main()
transformers/scripts/stale.py/0
{ "file_path": "transformers/scripts/stale.py", "repo_id": "transformers", "token_count": 1217 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore CHAT_MESSAGE_PROMPT = """ Human: <<task>> Assistant: """ DEFAULT_PROMPTS_REPO = "huggingface-tools/default-prompts" PROMPT_FILES = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"} def download_prompt(prompt_or_repo_id, agent_name, mode="run"): """ Downloads and caches the prompt from a repo and returns it contents (if necessary). """ if prompt_or_repo_id is None: prompt_or_repo_id = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s", prompt_or_repo_id) is not None: return prompt_or_repo_id prompt_file = cached_file( prompt_or_repo_id, PROMPT_FILES[mode], repo_type="dataset", user_agent={"agent": agent_name} ) with open(prompt_file, "r", encoding="utf-8") as f: return f.read() DEFAULT_CODE_SYSTEM_PROMPT = """You will be given a task to solve, your job is to come up with a series of simple commands in Python that will perform the task. To help you, I will give you access to a set of tools that you can use. Each tool is a Python function and has a description explaining the task it performs, the inputs it expects and the outputs it returns. You should first explain which tool you will use to perform the task and for what reason, then write the code in Python. Each instruction in Python should be a simple assignment. You can print intermediate results if it makes sense to do so. In the end, use tool 'final_answer' to return your answer, its argument will be what gets returned. You can use imports in your code, but only from the following list of modules: <<authorized_imports>> Be sure to provide a 'Code:' token, else the run will fail. Tools: <<tool_descriptions>> Examples: --- Task: "Answer the question in the variable `question` about the image stored in the variable `image`. The question is in French." Thought: I will use the following tools: `translator` to translate the question into English and then `image_qa` to answer the question on the input image. Code: ```py translated_question = translator(question=question, src_lang="French", tgt_lang="English") print(f"The translated question is {translated_question}.") answer = image_qa(image=image, question=translated_question) final_answer(f"The answer is {answer}") ```<end_action> --- Task: "Identify the oldest person in the `document` and create an image showcasing the result." Thought: I will use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer. Code: ```py answer = document_qa(document, question="What is the oldest person?") print(f"The answer is {answer}.") image = image_generator(answer) final_answer(image) ```<end_action> --- Task: "Generate an image using the text given in the variable `caption`." Thought: I will use the following tool: `image_generator` to generate an image. Code: ```py image = image_generator(prompt=caption) final_answer(image) ```<end_action> --- Task: "Summarize the text given in the variable `text` and read it out loud." Thought: I will use the following tools: `summarizer` to create a summary of the input text, then `text_reader` to read it out loud. Code: ```py summarized_text = summarizer(text) print(f"Summary: {summarized_text}") audio_summary = text_reader(summarized_text) final_answer(audio_summary) ```<end_action> --- Task: "Answer the question in the variable `question` about the text in the variable `text`. Use the answer to generate an image." Thought: I will use the following tools: `text_qa` to create the answer, then `image_generator` to generate an image according to the answer. Code: ```py answer = text_qa(text=text, question=question) print(f"The answer is {answer}.") image = image_generator(answer) final_answer(image) ```<end_action> --- Task: "Caption the following `image`." Thought: I will use the following tool: `image_captioner` to generate a caption for the image. Code: ```py caption = image_captioner(image) final_answer(caption) ```<end_action> --- Above example were using tools that might not exist for you. You only have access to these Tools: <<tool_names>> Remember to make sure that variables you use are all defined. Be sure to provide a 'Code:\n```' sequence before the code and '```<end_action>' after, else you will get an error. DO NOT pass the arguments as a dict as in 'answer = ask_search_agent({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = ask_search_agent(query="What is the place where James Bond lives?")'. Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. """ DEFAULT_REACT_JSON_SYSTEM_PROMPT = """You are an expert assistant who can solve any task using JSON tool calls. You will be given a task to solve as best you can. To do so, you have been given access to the following tools: <<tool_names>> The way you use the tools is by specifying a json blob, ending with '<end_action>'. Specifically, this json should have an `action` key (name of the tool to use) and an `action_input` key (input to the tool). The $ACTION_JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. It should be formatted in json. Do not try to escape special characters. Here is the template of a valid $ACTION_JSON_BLOB: { "action": $TOOL_NAME, "action_input": $INPUT }<end_action> Make sure to have the $INPUT as a dictionary in the right format for the tool you are using, and do not put variable names as input if you can find the right values. You should ALWAYS use the following format: Thought: you should always think about one action to take. Then use the action as follows: Action: $ACTION_JSON_BLOB Observation: the result of the action ... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $ACTION_JSON_BLOB must only use a SINGLE action at a time.) You can use the result of the previous action as input for the next action. The observation will always be a string: it can represent a file, like "image_1.jpg". Then you can use it as input for the next action. You can do it for instance as follows: Observation: "image_1.jpg" Thought: I need to transform the image that I received in the previous observation to make it green. Action: { "action": "image_transformer", "action_input": {"image": "image_1.jpg"} }<end_action> To provide the final answer to the task, use an action blob with "action": "final_answer" tool. It is the only way to complete the task, else you will be stuck on a loop. So your final output should look like this: Action: { "action": "final_answer", "action_input": {"answer": "insert your final answer here"} }<end_action> Here are a few examples using notional tools: --- Task: "Generate an image of the oldest person in this document." Thought: I will proceed step by step and use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer. Action: { "action": "document_qa", "action_input": {"document": "document.pdf", "question": "Who is the oldest person mentioned?"} }<end_action> Observation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland." Thought: I will now generate an image showcasing the oldest person. Action: { "action": "image_generator", "action_input": {"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."} }<end_action> Observation: "image.png" Thought: I will now return the generated image. Action: { "action": "final_answer", "action_input": "image.png" }<end_action> --- Task: "What is the result of the following operation: 5 + 3 + 1294.678?" Thought: I will use python code evaluator to compute the result of the operation and then return the final answer using the `final_answer` tool Action: { "action": "python_interpreter", "action_input": {"code": "5 + 3 + 1294.678"} }<end_action> Observation: 1302.678 Thought: Now that I know the result, I will now return it. Action: { "action": "final_answer", "action_input": "1302.678" }<end_action> --- Task: "Which city has the highest population , Guangzhou or Shanghai?" Thought: I need to get the populations for both cities and compare them: I will use the tool `search` to get the population of both cities. Action: { "action": "search", "action_input": "Population Guangzhou" }<end_action> Observation: ['Guangzhou has a population of 15 million inhabitants as of 2021.'] Thought: Now let's get the population of Shanghai using the tool 'search'. Action: { "action": "search", "action_input": "Population Shanghai" } Observation: '26 million (2019)' Thought: Now I know that Shanghai has a larger population. Let's return the result. Action: { "action": "final_answer", "action_input": "Shanghai" }<end_action> Above example were using notional tools that might not exist for you. You only have access to these tools: <<tool_descriptions>> Here are the rules you should always follow to solve your task: 1. ALWAYS provide a 'Thought:' sequence, and an 'Action:' sequence that ends with <end_action>, else you will fail. 2. Always use the right arguments for the tools. Never use variable names in the 'action_input' field, use the value instead. 3. Call a tool only when needed: do not call the search agent if you do not need information, try to solve the task yourself. 4. Never re-do a tool call that you previously did with the exact same parameters. Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. """ DEFAULT_REACT_CODE_SYSTEM_PROMPT = """You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can. To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code. To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences. At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use. Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '<end_action>' sequence. During each intermediate step, you can use 'print()' to save whatever important information you will then need. These print outputs will then appear in the 'Observation:' field, which will be available as input for the next step. In the end you have to return a final answer using the `final_answer` tool. Here are a few examples using notional tools: --- Task: "Generate an image of the oldest person in this document." Thought: I will proceed step by step and use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer. Code: ```py answer = document_qa(document=document, question="Who is the oldest person mentioned?") print(answer) ```<end_action> Observation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland." Thought: I will now generate an image showcasing the oldest person. Code: ```py image = image_generator("A portrait of John Doe, a 55-year-old man living in Canada.") final_answer(image) ```<end_action> --- Task: "What is the result of the following operation: 5 + 3 + 1294.678?" Thought: I will use python code to compute the result of the operation and then return the final answer using the `final_answer` tool Code: ```py result = 5 + 3 + 1294.678 final_answer(result) ```<end_action> --- Task: "Which city has the highest population: Guangzhou or Shanghai?" Thought: I need to get the populations for both cities and compare them: I will use the tool `search` to get the population of both cities. Code: ```py population_guangzhou = search("Guangzhou population") print("Population Guangzhou:", population_guangzhou) population_shanghai = search("Shanghai population") print("Population Shanghai:", population_shanghai) ```<end_action> Observation: Population Guangzhou: ['Guangzhou has a population of 15 million inhabitants as of 2021.'] Population Shanghai: '26 million (2019)' Thought: Now I know that Shanghai has the highest population. Code: ```py final_answer("Shanghai") ```<end_action> --- Task: "What is the current age of the pope, raised to the power 0.36?" Thought: I will use the tool `wiki` to get the age of the pope, then raise it to the power 0.36. Code: ```py pope_age = wiki(query="current pope age") print("Pope age:", pope_age) ```<end_action> Observation: Pope age: "The pope Francis is currently 85 years old." Thought: I know that the pope is 85 years old. Let's compute the result using python code. Code: ```py pope_current_age = 85 ** 0.36 final_answer(pope_current_age) ```<end_action> Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you have access to these tools (and no other tool): <<tool_descriptions>> <<managed_agents_descriptions>> Here are the rules you should always follow to solve your task: 1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```<end_action>' sequence, else you will fail. 2. Use only variables that you have defined! 3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in 'answer = wiki({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = wiki(query="What is the place where James Bond lives?")'. 4. Take care to not chain too many sequential tool calls in the same code block, especially when the output format is unpredictable. For instance, a call to search has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block. 5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters. 6. Don't name any new variable with the same name as a tool: for instance don't name a variable 'final_answer'. 7. Never create any notional variables in our code, as having these in your logs might derail you from the true variables. 8. You can use imports in your code, but only from the following list of modules: <<authorized_imports>> 9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist. 10. Don't give up! You're in charge of solving the task, not providing directions to solve it. Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. """ SYSTEM_PROMPT_FACTS = """Below I will present you a task. You will now build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need. To do so, you will have to read the task and identify things that must be discovered in order to successfully complete it. Don't make any assumptions. For each item, provide a thorough reasoning. Here is how you will structure this survey: --- ### 1. Facts given in the task List here the specific facts given in the task that could help you (there might be nothing here). ### 2. Facts to look up List here any facts that we may need to look up. Also list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here. ### 3. Facts to derive List here anything that we want to derive from the above by logical reasoning, for instance computation or simulation. Keep in mind that "facts" will typically be specific names, dates, values, etc. Your answer should use the below headings: ### 1. Facts given in the task ### 2. Facts to look up ### 3. Facts to derive Do not add anything else.""" SYSTEM_PROMPT_PLAN = """You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools. Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS. After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.""" USER_PROMPT_PLAN = """ Here is your task: Task: ``` {task} ``` Your plan can leverage any of these tools: {tool_descriptions} {managed_agents_descriptions} List of facts that you know: ``` {answer_facts} ``` Now begin! Write your plan below.""" SYSTEM_PROMPT_FACTS_UPDATE = """ You are a world expert at gathering known and unknown facts based on a conversation. Below you will find a task, and ahistory of attempts made to solve the task. You will have to produce a list of these: ### 1. Facts given in the task ### 2. Facts that we have learned ### 3. Facts still to look up ### 4. Facts still to derive Find the task and history below.""" USER_PROMPT_FACTS_UPDATE = """Earlier we've built a list of facts. But since in your previous steps you may have learned useful new facts or invalidated some false ones. Please update your list of facts based on the previous history, and provide these headings: ### 1. Facts given in the task ### 2. Facts that we have learned ### 3. Facts still to look up ### 4. Facts still to derive Now write your new list of facts below.""" SYSTEM_PROMPT_PLAN_UPDATE = """You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools. You have been given a task: ``` {task} ``` Find below the record of what has been tried so far to solve it. Then you will be asked to make an updated plan to solve the task. If the previous tries so far have met some success, you can make an updated plan based on these actions. If you are stalled, you can make a completely new plan starting from scratch. """ USER_PROMPT_PLAN_UPDATE = """You're still working towards solving this task: ``` {task} ``` You have access to these tools and only these: {tool_descriptions} {managed_agents_descriptions} Here is the up to date list of facts that you know: ``` {facts_update} ``` Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Beware that you have {remaining_steps} steps remaining. Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS. After writing the final step of the plan, write the '\n<end_plan>' tag and stop there. Now write your new plan below.""" SYSTEM_PROMPT_PLAN_STRUCTURED = """Output a step-by-step plan to solve the task using the given tools. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Each step should be structured as follows: Step #n: { "description": <description of what the step does and its output> "tool": <tool to use>, "params": { <parameters to pass to the tool as a valid dict> } "output_var": <output variable name> } Each step must be necessary to reach the final answer. Steps should reuse outputs produced by earlier steps. The last step must be the final answer. Below are some examples: Example 1: ------ Inputs: --- Task: How many encoder blocks were in the first attention-only ML architecture published? [FACTS LIST]: ### 1. Facts given in the task - The paper first introduced an attention-only ML architecture. - The specific information required is the page number where the number of encoder blocks is stated. - No local files are provided for access. ### 2. Facts to look up - The title and authors of the paper that first introduced an attention-only ML architecture. - Source: Online search (e.g., Google Scholar, arXiv, or other academic databases) - The full text of the identified paper. - Source: Online academic repositories (e.g., arXiv, journal websites) - The specific page number in the paper where the number of encoder blocks is mentioned. - Source: The content of the identified paper ### 3. Facts to derive - By identifying the correct paper and locating the specific page, we will derive the page number where the number of encoder blocks is stated. - Logical steps: Identify the correct paper, access its content, search for the term "encoder blocks," and note the page number where this information is found. ``` [STEP 1 TOOL CALL]: {'tool_name': 'code interpreter', 'tool_arguments': '# Step 1: Identify the title and authors of the paper that first introduced an attention-only ML architecture.\nanswer = ask_search_agent(query="Can you find the title and authors of the paper that first introduced an attention-only machine learning architecture? Please provide the full citation.")\nprint(answer)'} [OUTPUT OF STEP 1] Observation: **Title**: Attention Is All You Need **Authors**: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin [STEP 2 TOOL CALL]: {'tool_name': 'code interpreter', 'tool_arguments': '# Step 1: Find the full text of the identified paper on arXiv\\npaper_url = "https://arxiv.org/pdf/1706.03762.pdf"\\nprint(paper_url)'} [OUTPUT OF STEP 2] Observation: https://arxiv.org/pdf/1706.03762.pdf --- Output plan: --- Step #1: { "description": "Open the PDF of the paper from the provided URL and search within the text of the paper for the mention of "encoder blocks"", "tool": "inspect_file_as_text", "params": { "file_path": "https://arxiv.org/pdf/1706.03762.pdf", "question": "On which page is the number of encoder blocks mentioned?" }, "output_var": "page_number" } Step #2: { "description": "Provide the final answer", "tool": "final_answer", "params": { "answer": "{page_number}" }, "output_var": "" } ------ Example 2: ------ Inputs: --- Task: How many golf balls fits into a Boeing-747? [FACTS LIST]: ### 1. Facts given in the task - The task requires calculating the number of golf balls that fir into a Boeing-747 ### 2. Facts to look up - The volume of a golf ball - The volume of a Boeing-747 ### 3. Facts to derive - Once the volumes are known the final answer can be calculated --- Output plan: --- Step #1: { "description": "Find the volume of a Boeing-747", "tool": "web_search", "params": { "query": "What is the internal volume of a Boeing-747 in cubic meters?" }, "output_var": "boeing_volume" } Step #2: { "description": "Find the volume of a standard golf ball", "tool": "ask_search_agent", "params": { "query": "What is the volume of a standard golf ball in cubic centimeters?" }, "output_var": "golf_ball_volume" } Step #3: { "description": "Convert the volume of a golf ball from cubic centimeters to cubic meters. Calculate the number of golf balls that fit into the Boeing-747 by dividing the internal volume of the Boeing-747 by the volume of a golf ball.", "tool": "python_code", "params": { "code": "golf_ball_volume_m3 = golf_ball_volume / 1e6\nnumber_of_golf_balls = boeing_volume / golf_ball_volume_m3" }, "output_var": "number_of_golf_balls" } Step #4: { "description": "Provide the final answer", "tool": "final_answer", "params": { "answer": "{number_of_golf_balls}" }, "output_var": "" } ------ Above example were using tools that might not exist for you. Your goal is to create a plan to solve the task.""" USER_PROMPT_PLAN_STRUCTURED = """ Here are your inputs: Task: ``` {task} ``` Your plan can leverage any of these tools: {tool_descriptions} These tools are Python functions which you can call with code. You also have access to a Python interpreter so you can run Python code. List of facts that you know: ``` {answer_facts} ``` Now for the given task, create a plan taking into account the list of facts. After writing the final step of the plan, write the '\n<end_plan>' tag and stop there. Output the plan only and nothing else.""" SYSTEM_PROMPT_PLAN_UPDATE_STRUCTURED = """Output a step-by-step plan to solve the task using the given tools. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Each step should be structured as follows: Step #n: {{ "description": <description of what the step does and its output> "tool": <tool to use>, "params": {{ <parameters to pass to the tool as a valid dict> }} "output_var": <output variable name> }} Each step must be necessary to reach the final answer. Steps should reuse outputs produced by earlier steps. The last step must be the final answer. Below are some examples: Example 1: ------ Inputs: --- Task: How many encoder blocks were in the first attention-only ML architecture published? [FACTS LIST]: ### 1. Facts given in the task - The paper first introduced an attention-only ML architecture. - The specific information required is the page number where the number of encoder blocks is stated. - No local files are provided for access. ### 2. Facts to look up - The title and authors of the paper that first introduced an attention-only ML architecture. - Source: Online search (e.g., Google Scholar, arXiv, or other academic databases) - The full text of the identified paper. - Source: Online academic repositories (e.g., arXiv, journal websites) - The specific page number in the paper where the number of encoder blocks is mentioned. - Source: The content of the identified paper ### 3. Facts to derive - By identifying the correct paper and locating the specific page, we will derive the page number where the number of encoder blocks is stated. - Logical steps: Identify the correct paper, access its content, search for the term "encoder blocks," and note the page number where this information is found. ``` [STEP 1 TOOL CALL]: {{'tool_name': 'code interpreter', 'tool_arguments': '# Step 1: Identify the title and authors of the paper that first introduced an attention-only ML architecture.\nanswer = ask_search_agent(query="Can you find the title and authors of the paper that first introduced an attention-only machine learning architecture? Please provide the full citation.")\nprint(answer)'}} [OUTPUT OF STEP 1] Observation: **Title**: Attention Is All You Need **Authors**: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin [STEP 2 TOOL CALL]: {{'tool_name': 'code interpreter', 'tool_arguments': '# Step 1: Find the full text of the identified paper on arXiv\\npaper_url = "https://arxiv.org/pdf/1706.03762.pdf"\\nprint(paper_url)'}} [OUTPUT OF STEP 2] Observation: https://arxiv.org/pdf/1706.03762.pdf --- Output plan: --- Step #1: {{ "description": "Open the PDF of the paper from the provided URL and search within the text of the paper for the mention of "encoder blocks"", "tool": "inspect_file_as_text", "params": {{ "file_path": "https://arxiv.org/pdf/1706.03762.pdf", "question": "On which page is the number of encoder blocks mentioned?" }}, "output_var": "page_number" }} Step #2: {{ "description": "Provide the final answer", "tool": "final_answer", "params": {{ "answer": "{{page_number}}" }}, "output_var": "" }} ------ Example 2: ------ Inputs: --- Task: How many golf balls fits into a Boeing-747? [FACTS LIST]: ### 1. Facts given in the task - The task requires calculating the number of golf balls that fir into a Boeing-747 ### 2. Facts to look up - The volume of a golf ball - The volume of a Boeing-747 ### 3. Facts to derive - Once the volumes are known the final answer can be calculated --- Output plan: --- Step #1: {{ "description": "Find the volume of a Boeing-747", "tool": "web_search", "params": {{ "query": "What is the internal volume of a Boeing-747 in cubic meters?" }}, "output_var": "boeing_volume" }} Step #2: {{ "description": "Find the volume of a standard golf ball", "tool": "ask_search_agent", "params": {{ "query": "What is the volume of a standard golf ball in cubic centimeters?" }}, "output_var": "golf_ball_volume" }} Step #3: {{ "description": "Convert the volume of a golf ball from cubic centimeters to cubic meters. Calculate the number of golf balls that fit into the Boeing-747 by dividing the internal volume of the Boeing-747 by the volume of a golf ball.", "tool": "python_code", "params": {{ "code": "golf_ball_volume_m3 = golf_ball_volume / 1e6\nnumber_of_golf_balls = boeing_volume / golf_ball_volume_m3" }}, "output_var": "number_of_golf_balls" }} Step #4: {{ "description": "Provide the final answer", "tool": "final_answer", "params": {{ "answer": "{{number_of_golf_balls}}" }}, "output_var": "" }} ------ Above example were using tools that might not exist for you. Find below the record of what has been tried so far to solve it. Your goal is to create an updated plan to solve the task.""" USER_PROMPT_PLAN_UPDATE_STRUCTURED = """ Here are your inputs: Task: ``` {task} ``` Your plan can leverage any of these tools: {tool_descriptions} These tools are Python functions which you can call with code. You also have access to a Python interpreter so you can run Python code. List of facts that you know: ``` {facts_update} ``` Now for the given task, create a plan taking into account the above inputs and list of facts. Beware that you have {remaining_steps} steps remaining. After writing the final step of the plan, write the '\n<end_plan>' tag and stop there. Output the plan only and nothing else.""" PLAN_UPDATE_FINAL_PLAN_REDACTION = """I still need to solve the task I was given: ``` {task} ``` Here is my new/updated plan of action to solve the task: ``` {plan_update} ```""" SUPPORTED_PLAN_TYPES = ["default", "structured"] PROMPTS_FOR_INITIAL_PLAN = { "default": {"system": SYSTEM_PROMPT_PLAN, "user": USER_PROMPT_PLAN}, "structured": {"system": SYSTEM_PROMPT_PLAN_STRUCTURED, "user": USER_PROMPT_PLAN_STRUCTURED}, } PROMPTS_FOR_PLAN_UPDATE = { "default": {"system": SYSTEM_PROMPT_PLAN_UPDATE, "user": USER_PROMPT_PLAN_UPDATE}, "structured": {"system": SYSTEM_PROMPT_PLAN_UPDATE_STRUCTURED, "user": USER_PROMPT_PLAN_UPDATE_STRUCTURED}, }
transformers/src/transformers/agents/prompts.py/0
{ "file_path": "transformers/src/transformers/agents/prompts.py", "repo_id": "transformers", "token_count": 9105 }
""" Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. Inspired by: github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md To launch debugger while developing: ``` [lfs "customtransfer.multipart"] path = /path/to/transformers/.env/bin/python args = -m debugpy --listen 5678 --wait-for-client /path/to/transformers/src/transformers/commands/transformers_cli.py lfs-multipart-upload ```""" import json import os import subprocess import sys import warnings from argparse import ArgumentParser from contextlib import AbstractContextManager from typing import Dict, List, Optional import requests from ..utils import logging from . import BaseTransformersCLICommand logger = logging.get_logger(__name__) # pylint: disable=invalid-name LFS_MULTIPART_UPLOAD_COMMAND = "lfs-multipart-upload" class LfsCommands(BaseTransformersCLICommand): """ Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom transfer agent is: https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md This introduces two commands to the CLI: 1. $ transformers-cli lfs-enable-largefiles This should be executed once for each model repo that contains a model file >5GB. It's documented in the error message you get if you just try to git push a 5GB file without having enabled it before. 2. $ transformers-cli lfs-multipart-upload This command is called by lfs directly and is not meant to be called by the user. """ @staticmethod def register_subcommand(parser: ArgumentParser): enable_parser = parser.add_parser( "lfs-enable-largefiles", help=( "Deprecated: use `huggingface-cli` instead. Configure your repository to enable upload of files > 5GB." ), ) enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.") enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args)) upload_parser = parser.add_parser( LFS_MULTIPART_UPLOAD_COMMAND, help=( "Deprecated: use `huggingface-cli` instead. " "Command will get called by git-lfs, do not call it directly." ), ) upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args)) class LfsEnableCommand: def __init__(self, args): self.args = args def run(self): warnings.warn( "Managing repositories through transformers-cli is deprecated. Please use `huggingface-cli` instead." ) local_path = os.path.abspath(self.args.path) if not os.path.isdir(local_path): print("This does not look like a valid git repo.") exit(1) subprocess.run( "git config lfs.customtransfer.multipart.path transformers-cli".split(), check=True, cwd=local_path ) subprocess.run( f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(), check=True, cwd=local_path, ) print("Local repo set up for largefiles") def write_msg(msg: Dict): """Write out the message in Line delimited JSON.""" msg = json.dumps(msg) + "\n" sys.stdout.write(msg) sys.stdout.flush() def read_msg() -> Optional[Dict]: """Read Line delimited JSON from stdin.""" msg = json.loads(sys.stdin.readline().strip()) if "terminate" in (msg.get("type"), msg.get("event")): # terminate message received return None if msg.get("event") not in ("download", "upload"): logger.critical("Received unexpected message") sys.exit(1) return msg class FileSlice(AbstractContextManager): """ File-like object that only reads a slice of a file Inspired by stackoverflow.com/a/29838711/593036 """ def __init__(self, filepath: str, seek_from: int, read_limit: int): self.filepath = filepath self.seek_from = seek_from self.read_limit = read_limit self.n_seen = 0 def __enter__(self): self.f = open(self.filepath, "rb") self.f.seek(self.seek_from) return self def __len__(self): total_length = os.fstat(self.f.fileno()).st_size return min(self.read_limit, total_length - self.seek_from) def read(self, n=-1): if self.n_seen >= self.read_limit: return b"" remaining_amount = self.read_limit - self.n_seen data = self.f.read(remaining_amount if n < 0 else min(n, remaining_amount)) self.n_seen += len(data) return data def __iter__(self): yield self.read(n=4 * 1024 * 1024) def __exit__(self, *args): self.f.close() class LfsUploadCommand: def __init__(self, args): self.args = args def run(self): # Immediately after invoking a custom transfer process, git-lfs # sends initiation data to the process over stdin. # This tells the process useful information about the configuration. init_msg = json.loads(sys.stdin.readline().strip()) if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"): write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}}) sys.exit(1) # The transfer process should use the information it needs from the # initiation structure, and also perform any one-off setup tasks it # needs to do. It should then respond on stdout with a simple empty # confirmation structure, as follows: write_msg({}) # After the initiation exchange, git-lfs will send any number of # transfer requests to the stdin of the transfer process, in a serial sequence. while True: msg = read_msg() if msg is None: # When all transfers have been processed, git-lfs will send # a terminate event to the stdin of the transfer process. # On receiving this message the transfer process should # clean up and terminate. No response is expected. sys.exit(0) oid = msg["oid"] filepath = msg["path"] completion_url = msg["action"]["href"] header = msg["action"]["header"] chunk_size = int(header.pop("chunk_size")) presigned_urls: List[str] = list(header.values()) parts = [] for i, presigned_url in enumerate(presigned_urls): with FileSlice(filepath, seek_from=i * chunk_size, read_limit=chunk_size) as data: r = requests.put(presigned_url, data=data) r.raise_for_status() parts.append( { "etag": r.headers.get("etag"), "partNumber": i + 1, } ) # In order to support progress reporting while data is uploading / downloading, # the transfer process should post messages to stdout write_msg( { "event": "progress", "oid": oid, "bytesSoFar": (i + 1) * chunk_size, "bytesSinceLast": chunk_size, } ) # Not precise but that's ok. r = requests.post( completion_url, json={ "oid": oid, "parts": parts, }, ) r.raise_for_status() write_msg({"event": "complete", "oid": oid})
transformers/src/transformers/commands/lfs.py/0
{ "file_path": "transformers/src/transformers/commands/lfs.py", "repo_id": "transformers", "token_count": 3515 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pickle import random import time import warnings from typing import Dict, List, Optional import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) DEPRECATION_WARNING = ( "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " "library. You can have a look at this example script for pointers: {0}" ) class TextDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__( self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, cache_dir: Optional[str] = None, ): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if os.path.isfile(file_path) is False: raise ValueError(f"Input file path {file_path} not found") block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False) directory, filename = os.path.split(file_path) cached_features_file = os.path.join( cache_dir if cache_dir is not None else directory, f"cached_lm_{tokenizer.__class__.__name__}_{block_size}_{filename}", ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: start = time.time() with open(cached_features_file, "rb") as handle: self.examples = pickle.load(handle) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start ) else: logger.info(f"Creating features from dataset file at {directory}") self.examples = [] with open(file_path, encoding="utf-8") as f: text = f.read() tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size self.examples.append( tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size]) ) # Note that we are losing the last truncated example here for the sake of simplicity (no padding) # If your dataset is small, first you should look for a bigger one :-) and second you # can change this behavior by adding (model specific) padding. start = time.time() with open(cached_features_file, "wb") as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__(self): return len(self.examples) def __getitem__(self, i) -> torch.Tensor: return torch.tensor(self.examples[i], dtype=torch.long) class LineByLineTextDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if os.path.isfile(file_path) is False: raise ValueError(f"Input file path {file_path} not found") # Here, we do not cache the features, operating under the assumption # that we will soon use fast multithreaded tokenizers from the # `tokenizers` repo everywhere =) logger.info(f"Creating features from dataset file at {file_path}") with open(file_path, encoding="utf-8") as f: lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size) self.examples = batch_encoding["input_ids"] self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples] def __len__(self): return len(self.examples) def __getitem__(self, i) -> Dict[str, torch.tensor]: return self.examples[i] class LineByLineWithRefDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py" ), FutureWarning, ) if os.path.isfile(file_path) is False: raise ValueError(f"Input file path {file_path} not found") if os.path.isfile(ref_path) is False: raise ValueError(f"Ref file path {file_path} not found") # Here, we do not cache the features, operating under the assumption # that we will soon use fast multithreaded tokenizers from the # `tokenizers` repo everywhere =) logger.info(f"Creating features from dataset file at {file_path}") logger.info(f"Use ref segment results at {ref_path}") with open(file_path, encoding="utf-8") as f: data = f.readlines() # use this method to avoid delimiter '\u2029' to split a line data = [line.strip() for line in data if len(line) > 0 and not line.isspace()] # Get ref inf from file with open(ref_path, encoding="utf-8") as f: ref = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] if len(data) != len(ref): raise ValueError( f"Length of Input file should be equal to Ref file. But the length of {file_path} is {len(data)} " f"while length of {ref_path} is {len(ref)}" ) batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size) self.examples = batch_encoding["input_ids"] self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples] n = len(self.examples) for i in range(n): self.examples[i]["chinese_ref"] = torch.tensor(ref[i], dtype=torch.long) def __len__(self): return len(self.examples) def __getitem__(self, i) -> Dict[str, torch.tensor]: return self.examples[i] class LineByLineWithSOPTextDataset(Dataset): """ Dataset for sentence order prediction task, prepare sentence pairs for SOP task """ def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if os.path.isdir(file_dir) is False: raise ValueError(f"{file_dir} is not a directory") logger.info(f"Creating features from dataset file folder at {file_dir}") self.examples = [] # TODO: randomness could apply a random seed, ex. rng = random.Random(random_seed) # file path looks like ./dataset/wiki_1, ./dataset/wiki_2 for file_name in os.listdir(file_dir): file_path = os.path.join(file_dir, file_name) if os.path.isfile(file_path) is False: raise ValueError(f"{file_path} is not a file") article_open = False with open(file_path, encoding="utf-8") as f: original_lines = f.readlines() article_lines = [] for line in original_lines: if "<doc id=" in line: article_open = True elif "</doc>" in line: article_open = False document = [ tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line)) for line in article_lines[1:] if (len(line) > 0 and not line.isspace()) ] examples = self.create_examples_from_document(document, block_size, tokenizer) self.examples.extend(examples) article_lines = [] else: if article_open: article_lines.append(line) logger.info("Dataset parse finished.") def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1): """Creates examples for a single document.""" # Account for special tokens max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True) # We *usually* want to fill up the entire sequence since we are padding # to `block_size` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pretraining and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `block_size` is a hard limit. target_seq_length = max_num_tokens if random.random() < short_seq_prob: target_seq_length = random.randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. examples = [] current_chunk = [] # a buffer stored current working segments current_length = 0 i = 0 while i < len(document): segment = document[i] # get a segment if not segment: i += 1 continue current_chunk.append(segment) # add a segment to current chunk current_length += len(segment) # overall token length # if current length goes to the target length or reaches the end of file, start building token a and b if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` (first) sentence. a_end = 1 # if current chunk has more than 2 sentences, pick part of it `A` (first) sentence if len(current_chunk) >= 2: a_end = random.randint(1, len(current_chunk) - 1) # token a tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) # token b tokens_b = [] for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) if len(tokens_a) == 0 or len(tokens_b) == 0: continue # switch tokens_a and tokens_b randomly if random.random() < 0.5: is_next = False tokens_a, tokens_b = tokens_b, tokens_a else: is_next = True def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens): """Truncates a pair of sequences to a maximum sequence length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_num_tokens: break trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b if not (len(trunc_tokens) >= 1): raise ValueError("Sequence length to be truncated must be no less than one") # We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. if random.random() < 0.5: del trunc_tokens[0] else: trunc_tokens.pop() truncate_seq_pair(tokens_a, tokens_b, max_num_tokens) if not (len(tokens_a) >= 1): raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1") if not (len(tokens_b) >= 1): raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1") # add special tokens input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b) # add token type ids, 0 for sentence a, 1 for sentence b token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b) example = { "input_ids": torch.tensor(input_ids, dtype=torch.long), "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long), "sentence_order_label": torch.tensor(0 if is_next else 1, dtype=torch.long), } examples.append(example) current_chunk = [] # clear current chunk current_length = 0 # reset current text length i += 1 # go to next line return examples def __len__(self): return len(self.examples) def __getitem__(self, i) -> Dict[str, torch.tensor]: return self.examples[i] class TextDatasetForNextSentencePrediction(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ def __init__( self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, short_seq_probability=0.1, nsp_probability=0.5, ): warnings.warn( DEPRECATION_WARNING.format( "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py" ), FutureWarning, ) if not os.path.isfile(file_path): raise ValueError(f"Input file path {file_path} not found") self.short_seq_probability = short_seq_probability self.nsp_probability = nsp_probability directory, filename = os.path.split(file_path) cached_features_file = os.path.join( directory, f"cached_nsp_{tokenizer.__class__.__name__}_{block_size}_{filename}", ) self.tokenizer = tokenizer # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" # Input file format: # (1) One sentence per line. These should ideally be actual sentences, not # entire paragraphs or arbitrary spans of text. (Because we use the # sentence boundaries for the "next sentence prediction" task). # (2) Blank lines between documents. Document boundaries are needed so # that the "next sentence prediction" task doesn't span between documents. # # Example: # I am very happy. # Here is the second sentence. # # A new document. with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: start = time.time() with open(cached_features_file, "rb") as handle: self.examples = pickle.load(handle) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start ) else: logger.info(f"Creating features from dataset file at {directory}") self.documents = [[]] with open(file_path, encoding="utf-8") as f: while True: line = f.readline() if not line: break line = line.strip() # Empty lines are used as document delimiters if not line and len(self.documents[-1]) != 0: self.documents.append([]) tokens = tokenizer.tokenize(line) tokens = tokenizer.convert_tokens_to_ids(tokens) if tokens: self.documents[-1].append(tokens) logger.info(f"Creating examples from {len(self.documents)} documents.") self.examples = [] for doc_index, document in enumerate(self.documents): self.create_examples_from_document(document, doc_index, block_size) start = time.time() with open(cached_features_file, "wb") as handle: pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def create_examples_from_document(self, document: List[List[int]], doc_index: int, block_size: int): """Creates examples for a single document.""" max_num_tokens = block_size - self.tokenizer.num_special_tokens_to_add(pair=True) # We *usually* want to fill up the entire sequence since we are padding # to `block_size` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pretraining and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `block_size` is a hard limit. target_seq_length = max_num_tokens if random.random() < self.short_seq_probability: target_seq_length = random.randint(2, max_num_tokens) current_chunk = [] # a buffer stored current working segments current_length = 0 i = 0 while i < len(document): segment = document[i] current_chunk.append(segment) current_length += len(segment) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = random.randint(1, len(current_chunk) - 1) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] if len(current_chunk) == 1 or random.random() < self.nsp_probability: is_random_next = True target_b_length = target_seq_length - len(tokens_a) # This should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document # we're processing. for _ in range(10): random_document_index = random.randint(0, len(self.documents) - 1) if random_document_index != doc_index: break random_document = self.documents[random_document_index] random_start = random.randint(0, len(random_document) - 1) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we "put them back" so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) if not (len(tokens_a) >= 1): raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1") if not (len(tokens_b) >= 1): raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1") # add special tokens input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b) # add token type ids, 0 for sentence a, 1 for sentence b token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b) example = { "input_ids": torch.tensor(input_ids, dtype=torch.long), "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long), "next_sentence_label": torch.tensor(1 if is_random_next else 0, dtype=torch.long), } self.examples.append(example) current_chunk = [] current_length = 0 i += 1 def __len__(self): return len(self.examples) def __getitem__(self, i): return self.examples[i]
transformers/src/transformers/data/datasets/language_modeling.py/0
{ "file_path": "transformers/src/transformers/data/datasets/language_modeling.py", "repo_id": "transformers", "token_count": 11449 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available _import_structure = { "configuration_utils": [ "BaseWatermarkingConfig", "CompileConfig", "GenerationConfig", "GenerationMode", "SynthIDTextWatermarkingConfig", "WatermarkingConfig", ], "streamers": ["AsyncTextIteratorStreamer", "TextIteratorStreamer", "TextStreamer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["beam_constraints"] = [ "Constraint", "ConstraintListState", "DisjunctiveConstraint", "PhrasalConstraint", ] _import_structure["beam_search"] = [ "BeamHypotheses", "BeamScorer", "BeamSearchScorer", "ConstrainedBeamSearchScorer", ] _import_structure["candidate_generator"] = [ "AssistedCandidateGenerator", "CandidateGenerator", "EarlyExitCandidateGenerator", "PromptLookupCandidateGenerator", ] _import_structure["logits_process"] = [ "AlternatingCodebooksLogitsProcessor", "ClassifierFreeGuidanceLogitsProcessor", "EncoderNoRepeatNGramLogitsProcessor", "EncoderRepetitionPenaltyLogitsProcessor", "EpsilonLogitsWarper", "EtaLogitsWarper", "ExponentialDecayLengthPenalty", "ForcedBOSTokenLogitsProcessor", "ForcedEOSTokenLogitsProcessor", "HammingDiversityLogitsProcessor", "InfNanRemoveLogitsProcessor", "LogitNormalization", "LogitsProcessor", "LogitsProcessorList", "MinLengthLogitsProcessor", "MinNewTokensLengthLogitsProcessor", "MinPLogitsWarper", "NoBadWordsLogitsProcessor", "NoRepeatNGramLogitsProcessor", "PrefixConstrainedLogitsProcessor", "RepetitionPenaltyLogitsProcessor", "SequenceBiasLogitsProcessor", "SuppressTokensLogitsProcessor", "SuppressTokensAtBeginLogitsProcessor", "SynthIDTextWatermarkLogitsProcessor", "TemperatureLogitsWarper", "TopKLogitsWarper", "TopPLogitsWarper", "TypicalLogitsWarper", "UnbatchedClassifierFreeGuidanceLogitsProcessor", "WhisperTimeStampLogitsProcessor", "WatermarkLogitsProcessor", ] _import_structure["stopping_criteria"] = [ "MaxLengthCriteria", "MaxTimeCriteria", "ConfidenceCriteria", "EosTokenCriteria", "StoppingCriteria", "StoppingCriteriaList", "validate_stopping_criteria", "StopStringCriteria", ] _import_structure["utils"] = [ "GenerationMixin", "GreedySearchEncoderDecoderOutput", "GreedySearchDecoderOnlyOutput", "SampleEncoderDecoderOutput", "SampleDecoderOnlyOutput", "BeamSearchEncoderDecoderOutput", "BeamSearchDecoderOnlyOutput", "BeamSampleEncoderDecoderOutput", "BeamSampleDecoderOnlyOutput", "ContrastiveSearchEncoderDecoderOutput", "ContrastiveSearchDecoderOnlyOutput", "GenerateBeamDecoderOnlyOutput", "GenerateBeamEncoderDecoderOutput", "GenerateDecoderOnlyOutput", "GenerateEncoderDecoderOutput", ] _import_structure["watermarking"] = [ "WatermarkDetector", "WatermarkDetectorOutput", "BayesianDetectorModel", "BayesianDetectorConfig", "SynthIDTextWatermarkDetector", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tf_logits_process"] = [ "TFForcedBOSTokenLogitsProcessor", "TFForcedEOSTokenLogitsProcessor", "TFForceTokensLogitsProcessor", "TFLogitsProcessor", "TFLogitsProcessorList", "TFLogitsWarper", "TFMinLengthLogitsProcessor", "TFNoBadWordsLogitsProcessor", "TFNoRepeatNGramLogitsProcessor", "TFRepetitionPenaltyLogitsProcessor", "TFSuppressTokensAtBeginLogitsProcessor", "TFSuppressTokensLogitsProcessor", "TFTemperatureLogitsWarper", "TFTopKLogitsWarper", "TFTopPLogitsWarper", ] _import_structure["tf_utils"] = [ "TFGenerationMixin", "TFGreedySearchDecoderOnlyOutput", "TFGreedySearchEncoderDecoderOutput", "TFSampleEncoderDecoderOutput", "TFSampleDecoderOnlyOutput", "TFBeamSearchEncoderDecoderOutput", "TFBeamSearchDecoderOnlyOutput", "TFBeamSampleEncoderDecoderOutput", "TFBeamSampleDecoderOnlyOutput", "TFContrastiveSearchEncoderDecoderOutput", "TFContrastiveSearchDecoderOnlyOutput", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["flax_logits_process"] = [ "FlaxForcedBOSTokenLogitsProcessor", "FlaxForcedEOSTokenLogitsProcessor", "FlaxForceTokensLogitsProcessor", "FlaxLogitsProcessor", "FlaxLogitsProcessorList", "FlaxLogitsWarper", "FlaxMinLengthLogitsProcessor", "FlaxSuppressTokensAtBeginLogitsProcessor", "FlaxSuppressTokensLogitsProcessor", "FlaxTemperatureLogitsWarper", "FlaxTopKLogitsWarper", "FlaxTopPLogitsWarper", "FlaxWhisperTimeStampLogitsProcessor", "FlaxNoRepeatNGramLogitsProcessor", ] _import_structure["flax_utils"] = [ "FlaxGenerationMixin", "FlaxGreedySearchOutput", "FlaxSampleOutput", "FlaxBeamSearchOutput", ] if TYPE_CHECKING: from .configuration_utils import ( BaseWatermarkingConfig, CompileConfig, GenerationConfig, GenerationMode, SynthIDTextWatermarkingConfig, WatermarkingConfig, ) from .streamers import AsyncTextIteratorStreamer, TextIteratorStreamer, TextStreamer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .beam_constraints import Constraint, ConstraintListState, DisjunctiveConstraint, PhrasalConstraint from .beam_search import BeamHypotheses, BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer from .candidate_generator import ( AssistedCandidateGenerator, CandidateGenerator, EarlyExitCandidateGenerator, PromptLookupCandidateGenerator, ) from .logits_process import ( AlternatingCodebooksLogitsProcessor, ClassifierFreeGuidanceLogitsProcessor, EncoderNoRepeatNGramLogitsProcessor, EncoderRepetitionPenaltyLogitsProcessor, EpsilonLogitsWarper, EtaLogitsWarper, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessor, LogitsProcessorList, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, MinPLogitsWarper, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SequenceBiasLogitsProcessor, SuppressTokensAtBeginLogitsProcessor, SuppressTokensLogitsProcessor, SynthIDTextWatermarkLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, UnbatchedClassifierFreeGuidanceLogitsProcessor, WatermarkLogitsProcessor, WhisperTimeStampLogitsProcessor, ) from .stopping_criteria import ( ConfidenceCriteria, EosTokenCriteria, MaxLengthCriteria, MaxTimeCriteria, StoppingCriteria, StoppingCriteriaList, StopStringCriteria, validate_stopping_criteria, ) from .utils import ( BeamSampleDecoderOnlyOutput, BeamSampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, ContrastiveSearchDecoderOnlyOutput, ContrastiveSearchEncoderDecoderOutput, GenerateBeamDecoderOnlyOutput, GenerateBeamEncoderDecoderOutput, GenerateDecoderOnlyOutput, GenerateEncoderDecoderOutput, GenerationMixin, GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, ) from .watermarking import ( BayesianDetectorConfig, BayesianDetectorModel, SynthIDTextWatermarkDetector, WatermarkDetector, WatermarkDetectorOutput, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessor, TFLogitsProcessorList, TFLogitsWarper, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from .tf_utils import ( TFBeamSampleDecoderOnlyOutput, TFBeamSampleEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput, TFBeamSearchEncoderDecoderOutput, TFContrastiveSearchDecoderOnlyOutput, TFContrastiveSearchEncoderDecoderOutput, TFGenerationMixin, TFGreedySearchDecoderOnlyOutput, TFGreedySearchEncoderDecoderOutput, TFSampleDecoderOnlyOutput, TFSampleEncoderDecoderOutput, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .flax_logits_process import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxForceTokensLogitsProcessor, FlaxLogitsProcessor, FlaxLogitsProcessorList, FlaxLogitsWarper, FlaxMinLengthLogitsProcessor, FlaxNoRepeatNGramLogitsProcessor, FlaxSuppressTokensAtBeginLogitsProcessor, FlaxSuppressTokensLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, FlaxWhisperTimeStampLogitsProcessor, ) from .flax_utils import FlaxBeamSearchOutput, FlaxGenerationMixin, FlaxGreedySearchOutput, FlaxSampleOutput else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/generation/__init__.py/0
{ "file_path": "transformers/src/transformers/generation/__init__.py", "repo_id": "transformers", "token_count": 5532 }
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import warnings from io import BytesIO from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union import numpy as np import requests from .dynamic_module_utils import custom_object_save from .feature_extraction_utils import BatchFeature as BaseBatchFeature from .utils import ( IMAGE_PROCESSOR_NAME, PushToHubMixin, add_model_info_to_auto_map, add_model_info_to_custom_pipelines, cached_file, copy_func, download_url, is_offline_mode, is_remote_url, is_vision_available, logging, ) if is_vision_available(): from PIL import Image ImageProcessorType = TypeVar("ImageProcessorType", bound="ImageProcessingMixin") logger = logging.get_logger(__name__) # TODO: Move BatchFeature to be imported by both image_processing_utils and image_processing_utils # We override the class string here, but logic is the same. class BatchFeature(BaseBatchFeature): r""" Holds the output of the image processor specific `__call__` methods. This class is derived from a python dictionary and can be used as a dictionary. Args: data (`dict`): Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.). tensor_type (`Union[None, str, TensorType]`, *optional*): You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization. """ # TODO: (Amy) - factor out the common parts of this and the feature extractor class ImageProcessingMixin(PushToHubMixin): """ This is an image processor mixin used to provide saving/loading functionality for sequential and image feature extractors. """ _auto_class = None def __init__(self, **kwargs): """Set elements of `kwargs` as attributes.""" # This key was saved while we still used `XXXFeatureExtractor` for image processing. Now we use # `XXXImageProcessor`, this attribute and its value are misleading. kwargs.pop("feature_extractor_type", None) # Pop "processor_class" as it should be saved as private attribute self._processor_class = kwargs.pop("processor_class", None) # Additional attributes without default values for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err def _set_processor_class(self, processor_class: str): """Sets processor class as an attribute.""" self._processor_class = processor_class @classmethod def from_pretrained( cls: Type[ImageProcessorType], pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", **kwargs, ) -> ImageProcessorType: r""" Instantiate a type of [`~image_processing_utils.ImageProcessingMixin`] from an image processor. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a pretrained image_processor hosted inside a model repo on huggingface.co. - a path to a *directory* containing a image processor file saved using the [`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g., `./my_model_directory/`. - a path or url to a saved image processor JSON *file*, e.g., `./my_model_directory/preprocessor_config.json`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model image processor should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the image processor files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final image processor object. If `True`, then this functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of `kwargs` which has not been used to update `image_processor` and is otherwise ignored. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. kwargs (`Dict[str, Any]`, *optional*): The values in kwargs of any keys which are image processor attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is controlled by the `return_unused_kwargs` keyword parameter. Returns: A image processor of type [`~image_processing_utils.ImageProcessingMixin`]. Examples: ```python # We can't instantiate directly the base class *ImageProcessingMixin* so let's show the examples on a # derived class: *CLIPImageProcessor* image_processor = CLIPImageProcessor.from_pretrained( "openai/clip-vit-base-patch32" ) # Download image_processing_config from huggingface.co and cache. image_processor = CLIPImageProcessor.from_pretrained( "./test/saved_model/" ) # E.g. image processor (or model) was saved using *save_pretrained('./test/saved_model/')* image_processor = CLIPImageProcessor.from_pretrained("./test/saved_model/preprocessor_config.json") image_processor = CLIPImageProcessor.from_pretrained( "openai/clip-vit-base-patch32", do_normalize=False, foo=False ) assert image_processor.do_normalize is False image_processor, unused_kwargs = CLIPImageProcessor.from_pretrained( "openai/clip-vit-base-patch32", do_normalize=False, foo=False, return_unused_kwargs=True ) assert image_processor.do_normalize is False assert unused_kwargs == {"foo": False} ```""" kwargs["cache_dir"] = cache_dir kwargs["force_download"] = force_download kwargs["local_files_only"] = local_files_only kwargs["revision"] = revision use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token image_processor_dict, kwargs = cls.get_image_processor_dict(pretrained_model_name_or_path, **kwargs) return cls.from_dict(image_processor_dict, **kwargs) def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save an image processor object to the directory `save_directory`, so that it can be re-loaded using the [`~image_processing_utils.ImageProcessingMixin.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the image processor JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self) # If we save using the predefined names, we can load using `from_pretrained` output_image_processor_file = os.path.join(save_directory, IMAGE_PROCESSOR_NAME) self.to_json_file(output_image_processor_file) logger.info(f"Image processor saved in {output_image_processor_file}") if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("token"), ) return [output_image_processor_file] @classmethod def get_image_processor_dict( cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a image processor of type [`~image_processor_utils.ImageProcessingMixin`] using `from_dict`. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): The identifier of the pre-trained checkpoint from which we want the dictionary of parameters. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. image_processor_filename (`str`, *optional*, defaults to `"config.json"`): The name of the file in the model directory to use for the image processor config. Returns: `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the image processor object. """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) use_auth_token = kwargs.pop("use_auth_token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", "") image_processor_filename = kwargs.pop("image_processor_filename", IMAGE_PROCESSOR_NAME) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token user_agent = {"file_type": "image processor", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): image_processor_file = os.path.join(pretrained_model_name_or_path, image_processor_filename) if os.path.isfile(pretrained_model_name_or_path): resolved_image_processor_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): image_processor_file = pretrained_model_name_or_path resolved_image_processor_file = download_url(pretrained_model_name_or_path) else: image_processor_file = image_processor_filename try: # Load from local folder or from cache or download from model Hub and cache resolved_image_processor_file = cached_file( pretrained_model_name_or_path, image_processor_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, ) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load image processor for '{pretrained_model_name_or_path}'. If you were trying to load" " it from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a {image_processor_filename} file" ) try: # Load image_processor dict with open(resolved_image_processor_file, "r", encoding="utf-8") as reader: text = reader.read() image_processor_dict = json.loads(text) except json.JSONDecodeError: raise EnvironmentError( f"It looks like the config file at '{resolved_image_processor_file}' is not a valid JSON file." ) if is_local: logger.info(f"loading configuration file {resolved_image_processor_file}") else: logger.info( f"loading configuration file {image_processor_file} from cache at {resolved_image_processor_file}" ) if "auto_map" in image_processor_dict: image_processor_dict["auto_map"] = add_model_info_to_auto_map( image_processor_dict["auto_map"], pretrained_model_name_or_path ) if "custom_pipelines" in image_processor_dict: image_processor_dict["custom_pipelines"] = add_model_info_to_custom_pipelines( image_processor_dict["custom_pipelines"], pretrained_model_name_or_path ) return image_processor_dict, kwargs @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Instantiates a type of [`~image_processing_utils.ImageProcessingMixin`] from a Python dictionary of parameters. Args: image_processor_dict (`Dict[str, Any]`): Dictionary that will be used to instantiate the image processor object. Such a dictionary can be retrieved from a pretrained checkpoint by leveraging the [`~image_processing_utils.ImageProcessingMixin.to_dict`] method. kwargs (`Dict[str, Any]`): Additional parameters from which to initialize the image processor object. Returns: [`~image_processing_utils.ImageProcessingMixin`]: The image processor object instantiated from those parameters. """ image_processor_dict = image_processor_dict.copy() return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) # The `size` parameter is a dict and was previously an int or tuple in feature extractors. # We set `size` here directly to the `image_processor_dict` so that it is converted to the appropriate # dict within the image processor and isn't overwritten if `size` is passed in as a kwarg. if "size" in kwargs and "size" in image_processor_dict: image_processor_dict["size"] = kwargs.pop("size") if "crop_size" in kwargs and "crop_size" in image_processor_dict: image_processor_dict["crop_size"] = kwargs.pop("crop_size") image_processor = cls(**image_processor_dict) # Update image_processor with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(image_processor, key): setattr(image_processor, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info(f"Image processor {image_processor}") if return_unused_kwargs: return image_processor, kwargs else: return image_processor def to_dict(self) -> Dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this image processor instance. """ output = copy.deepcopy(self.__dict__) output["image_processor_type"] = self.__class__.__name__ return output @classmethod def from_json_file(cls, json_file: Union[str, os.PathLike]): """ Instantiates a image processor of type [`~image_processing_utils.ImageProcessingMixin`] from the path to a JSON file of parameters. Args: json_file (`str` or `os.PathLike`): Path to the JSON file containing the parameters. Returns: A image processor of type [`~image_processing_utils.ImageProcessingMixin`]: The image_processor object instantiated from that JSON file. """ with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() image_processor_dict = json.loads(text) return cls(**image_processor_dict) def to_json_string(self) -> str: """ Serializes this instance to a JSON string. Returns: `str`: String containing all the attributes that make up this feature_extractor instance in JSON format. """ dictionary = self.to_dict() for key, value in dictionary.items(): if isinstance(value, np.ndarray): dictionary[key] = value.tolist() # make sure private name "_processor_class" is correctly # saved as "processor_class" _processor_class = dictionary.pop("_processor_class", None) if _processor_class is not None: dictionary["processor_class"] = _processor_class return json.dumps(dictionary, indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path: Union[str, os.PathLike]): """ Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this image_processor instance's parameters will be saved. """ with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" @classmethod def register_for_auto_class(cls, auto_class="AutoImageProcessor"): """ Register this class with a given auto class. This should only be used for custom image processors as the ones in the library are already mapped with `AutoImageProcessor `. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoImageProcessor "`): The auto class to register this new image processor with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class def fetch_images(self, image_url_or_urls: Union[str, List[str]]): """ Convert a single or a list of urls into the corresponding `PIL.Image` objects. If a single url is passed, the return value will be a single object. If a list is passed a list of objects is returned. """ headers = { "User-Agent": ( "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0" " Safari/537.36" ) } if isinstance(image_url_or_urls, list): return [self.fetch_images(x) for x in image_url_or_urls] elif isinstance(image_url_or_urls, str): response = requests.get(image_url_or_urls, stream=True, headers=headers) response.raise_for_status() return Image.open(BytesIO(response.content)) else: raise TypeError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}") ImageProcessingMixin.push_to_hub = copy_func(ImageProcessingMixin.push_to_hub) if ImageProcessingMixin.push_to_hub.__doc__ is not None: ImageProcessingMixin.push_to_hub.__doc__ = ImageProcessingMixin.push_to_hub.__doc__.format( object="image processor", object_class="AutoImageProcessor", object_files="image processor file" )
transformers/src/transformers/image_processing_base.py/0
{ "file_path": "transformers/src/transformers/image_processing_base.py", "repo_id": "transformers", "token_count": 10635 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import TYPE_CHECKING from ..utils import is_torch_available if TYPE_CHECKING: from torch import nn def is_fsdp_managed_module(module: nn.Module) -> bool: if not is_torch_available(): return False import torch if not torch.distributed.is_available(): return False import torch.distributed.fsdp return isinstance(module, torch.distributed.fsdp.FullyShardedDataParallel) or getattr( module, "_is_fsdp_managed_module", False )
transformers/src/transformers/integrations/fsdp.py/0
{ "file_path": "transformers/src/transformers/integrations/fsdp.py", "repo_id": "transformers", "token_count": 345 }
/*! ************************************************************************************************** * Deformable DETR * Copyright (c) 2020 SenseTime. All Rights Reserved. * Licensed under the Apache License, Version 2.0 [see LICENSE for details] ************************************************************************************************** * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 ************************************************************************************************** */ #include <vector> #include "cuda/ms_deform_im2col_cuda.cuh" #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda.h> #include <cuda_runtime.h> #pragma once #include <torch/extension.h> at::Tensor ms_deform_attn_cuda_forward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &level_start_index, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const int im2col_step) { at::DeviceGuard guard(value.device()); AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); AT_ASSERTM(value.is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(level_start_index.is_cuda(), "level_start_index must be a CUDA tensor"); AT_ASSERTM(sampling_loc.is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.is_cuda(), "attn_weight must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = std::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); auto output = at::zeros({batch, num_query, num_heads, channels}, value.options()); const int batch_n = im2col_step_; auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; for (int n = 0; n < batch/im2col_step_; ++n) { auto columns = output_n.select(0, n); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, value.scalar_type(), "ms_deform_attn_forward_cuda", ([&] { ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), value.data_ptr<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data_ptr<int64_t>(), level_start_index.data_ptr<int64_t>(), sampling_loc.data_ptr<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data_ptr<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, columns.data_ptr<scalar_t>()); })); } output = output.view({batch, num_query, num_heads*channels}); return output; } std::vector<at::Tensor> ms_deform_attn_cuda_backward( const at::Tensor &value, const at::Tensor &spatial_shapes, const at::Tensor &level_start_index, const at::Tensor &sampling_loc, const at::Tensor &attn_weight, const at::Tensor &grad_output, const int im2col_step) { at::DeviceGuard guard(value.device()); AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); AT_ASSERTM(value.is_cuda(), "value must be a CUDA tensor"); AT_ASSERTM(spatial_shapes.is_cuda(), "spatial_shapes must be a CUDA tensor"); AT_ASSERTM(level_start_index.is_cuda(), "level_start_index must be a CUDA tensor"); AT_ASSERTM(sampling_loc.is_cuda(), "sampling_loc must be a CUDA tensor"); AT_ASSERTM(attn_weight.is_cuda(), "attn_weight must be a CUDA tensor"); AT_ASSERTM(grad_output.is_cuda(), "grad_output must be a CUDA tensor"); const int batch = value.size(0); const int spatial_size = value.size(1); const int num_heads = value.size(2); const int channels = value.size(3); const int num_levels = spatial_shapes.size(0); const int num_query = sampling_loc.size(1); const int num_point = sampling_loc.size(4); const int im2col_step_ = std::min(batch, im2col_step); AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); auto grad_value = at::zeros_like(value); auto grad_sampling_loc = at::zeros_like(sampling_loc); auto grad_attn_weight = at::zeros_like(attn_weight); const int batch_n = im2col_step_; auto per_value_size = spatial_size * num_heads * channels; auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); for (int n = 0; n < batch/im2col_step_; ++n) { auto grad_output_g = grad_output_n.select(0, n); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, value.scalar_type(), "ms_deform_attn_backward_cuda", ([&] { ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), grad_output_g.data_ptr<scalar_t>(), value.data_ptr<scalar_t>() + n * im2col_step_ * per_value_size, spatial_shapes.data_ptr<int64_t>(), level_start_index.data_ptr<int64_t>(), sampling_loc.data_ptr<scalar_t>() + n * im2col_step_ * per_sample_loc_size, attn_weight.data_ptr<scalar_t>() + n * im2col_step_ * per_attn_weight_size, batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, grad_value.data_ptr<scalar_t>() + n * im2col_step_ * per_value_size, grad_sampling_loc.data_ptr<scalar_t>() + n * im2col_step_ * per_sample_loc_size, grad_attn_weight.data_ptr<scalar_t>() + n * im2col_step_ * per_attn_weight_size); })); } return { grad_value, grad_sampling_loc, grad_attn_weight }; }
transformers/src/transformers/kernels/deformable_detr/cuda/ms_deform_attn_cuda.cu/0
{ "file_path": "transformers/src/transformers/kernels/deformable_detr/cuda/ms_deform_attn_cuda.cu", "repo_id": "transformers", "token_count": 3260 }
#include "cuda_kernel.h" ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void index_max_cuda_kernel( float *index_vals, // [batch_size, 32, num_block] int *indices, // [batch_size, num_block] float *max_vals, // [batch_size, A_num_block * 32] float *max_vals_scatter, // [batch_size, 32, num_block] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.x; long thread_idx = threadIdx.x; long num_thread = blockDim.x; extern __shared__ float buffer[]; int *max_buffer = (int*)buffer; for (int i = 0; i < A_num_block * 32; i = i + num_thread) { int idx = i + thread_idx; if (idx < A_num_block * 32) { max_buffer[idx] = -1e8; } } __syncthreads(); int *indices_pt = &indices[batch_idx * num_block]; float *index_vals_pt = &index_vals[batch_idx * num_block * 32]; for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) { int idx = idx_start + thread_idx; int A_block_idx = indices_pt[idx % num_block] / B_num_block; atomicMax(&max_buffer[A_block_idx * 32 + idx / num_block], (int)(index_vals_pt[idx] * 1000)); } __syncthreads(); float *max_vals_pt = &max_vals[batch_idx * A_num_block * 32]; for (int i = 0; i < A_num_block * 32; i = i + num_thread) { int idx = i + thread_idx; if (idx < A_num_block * 32) { max_vals_pt[idx] = (float)max_buffer[idx] / 1000.; } } float *max_vals_scatter_pt = &max_vals_scatter[batch_idx * num_block * 32]; for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) { int idx = idx_start + thread_idx; int A_block_idx = indices_pt[idx % num_block] / B_num_block; max_vals_scatter_pt[idx] = (float)max_buffer[A_block_idx * 32 + idx / num_block] / 1000.; } } __global__ void mm_to_sparse_cuda_kernel( float *dense_A, // [batch_size, A_num_block, dim, 32] float *dense_B, // [batch_size, B_num_block, dim, 32] int *indices, // [batch_size, num_block] float *sparse_C, // [batch_size, num_block, 32, 32] long batch_size, long A_num_block, long B_num_block, long dim, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; __shared__ float buffer[4096]; float *A_buffer = &buffer[threadIdx.y * 1024]; // [2, 8, 32] float *B_buffer = &buffer[threadIdx.y * 1024 + 512]; // [2, 8, 32] long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * dim * 32]; float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * dim * 32]; int reg_1_idx = thread_idx / 8; // [0000000011111111222222223333333344444444555555556666666677777777] int reg_2_idx = thread_idx % 8; // [0123456701234567012345670123456701234567012345670123456701234567] float reg_1[8]; float reg_2[8]; float reg_array[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #pragma unroll for (int i = 0; i < 4; i++) { A_buffer[i * 64 + thread_idx] = dense_A_pt[i * 64 + thread_idx]; B_buffer[i * 64 + thread_idx] = dense_B_pt[i * 64 + thread_idx]; } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = A_buffer[reg_1_idx * 4 + i]; reg_2[i] = B_buffer[reg_2_idx * 4 + i]; } for (int dim_stride = 1; dim_stride < (dim / 8); dim_stride++) { #pragma unroll for (int i = 0; i < 4; i++) { A_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_A_pt[dim_stride * 256 + i * 64 + thread_idx]; B_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_B_pt[dim_stride * 256 + i * 64 + thread_idx]; } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_1_idx * 4 + i]; reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = A_buffer[(dim_stride % 2) * 256 + reg_1_idx * 4 + i]; reg_2[i] = B_buffer[(dim_stride % 2) * 256 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[256 + mini_dim_idx * 32 + reg_1_idx * 4 + i]; reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[256 + mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } __syncthreads(); float *C_buffer = &buffer[threadIdx.y * 1024]; // [32, 32] #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { C_buffer[(reg_2_idx * 4 + j) * 32 + reg_1_idx * 4 + i] = reg_array[i * 4 + j]; } } __syncthreads(); float *sparse_C_pt = &sparse_C[batch_idx__block_idx * 1024]; #pragma unroll for (int i = 0; i < 16; i++) { sparse_C_pt[i * 64 + thread_idx] = C_buffer[i * 64 + thread_idx]; } } __global__ void sparse_dense_mm_cuda_kernel( float *sparse_A, // [batch_size, num_block, 32, 32] int *indices, // [batch_size, num_block] float *dense_B, // [batch_size, B_num_block, dim, 32] float *dense_C, // [batch_size, A_num_block, dim, 32] long batch_size, long A_num_block, long B_num_block, long dim, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; __shared__ float buffer[6144]; float *A_buffer = &buffer[threadIdx.y * 3072]; // [32, 32] float *B_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [32, 64] long batch_idx__block_idx = batch_idx * num_block + block_idx; float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024]; #pragma unroll for (int i = 0; i < 8; i++) { A_buffer[i * 128 + thread_idx] = sparse_A_pt[i * 128 + thread_idx]; } long AB_block_idx = indices[batch_idx__block_idx]; float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * 32 * dim]; float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32 * dim]; // [0000000011111111222222223333333344444444555555556666666677777777] // [0123456701234567012345670123456701234567012345670123456701234567] int reg_1_idx = thread_idx / 8; int reg_2_idx = thread_idx % 8; float reg_1[8]; float reg_2[8]; float reg_array[16]; for (int dim_stride = 0; dim_stride < dim; dim_stride = dim_stride + 64) { #pragma unroll for (int i = 0; i < 16; i++) { B_buffer[i * 128 + thread_idx] = dense_B_pt[dim_stride * 32 + i * 128 + thread_idx]; } #pragma unroll for (int i = 0; i < 16; i++) { reg_array[i] = 0; } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = B_buffer[(reg_1_idx * 4 + i) * 32]; reg_2[i] = A_buffer[reg_2_idx * 4 + i]; } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 32; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = B_buffer[(reg_1_idx * 4 + i) * 32 + mini_dim_idx]; reg_2[(mini_dim_idx % 2) * 4 + i] = A_buffer[mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } __syncthreads(); float *C_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [64, 32] #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { C_buffer[(reg_1_idx * 4 + i) * 32 + reg_2_idx * 4 + j] = reg_array[i * 4 + j]; } } __syncthreads(); #pragma unroll for (int i = 0; i < 16; i++) { atomicAdd(&dense_C_pt[dim_stride * 32 + i * 128 + thread_idx], C_buffer[i * 128 + thread_idx]); } __syncthreads(); } } __global__ void reduce_sum_cuda_kernel( float *sparse_A, // [batch_size, num_block, 32, 32] int *indices, // [batch_size, num_block] float *dense_C, // [batch_size, A_num_block, 32] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024]; float reg_array[16]; float value = 0; #pragma unroll for (int i = 0; i < 8; i++) { reg_array[i] = sparse_A_pt[i * 32 + thread_idx]; } #pragma unroll for (int stride = 8; stride < 32; stride = stride + 8) { #pragma unroll for (int i = 0; i < 8; i++) { reg_array[(stride + i) % 16] = sparse_A_pt[(stride + i) * 32 + thread_idx]; } #pragma unroll for (int i = 0; i < 8; i++) { value = value + reg_array[(stride - 8 + i) % 16]; } } #pragma unroll for (int i = 0; i < 8; i++) { value = value + reg_array[8 + i]; } float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32]; atomicAdd(&dense_C_pt[thread_idx], value); } __global__ void scatter_cuda_kernel( float *dense_A, // [batch_size, A_num_block, 32] int *indices, // [batch_size, num_block] float *sparse_C, // [batch_size, num_block, 32, 32] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32]; float *sparse_C_pt = &sparse_C[(batch_idx * num_block + block_idx) * 1024]; float value = dense_A_pt[thread_idx]; #pragma unroll for (int i = 0; i < 32; i++) { sparse_C_pt[i * 32 + thread_idx] = value; } }
transformers/src/transformers/kernels/mra/cuda_kernel.cu/0
{ "file_path": "transformers/src/transformers/kernels/mra/cuda_kernel.cu", "repo_id": "transformers", "token_count": 5563 }
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF general model utils.""" from __future__ import annotations import functools import gc import inspect import json import os import pickle import re import warnings from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from packaging.version import parse from . import DataCollatorWithPadding, DefaultDataCollator from .activations_tf import get_tf_activation from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation import GenerationConfig, TFGenerationMixin from .tf_utils import ( convert_batch_encoding, expand_1d, load_attributes_from_hdf5_group, save_attributes_to_hdf5_group, shape_list, ) from .utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_file, download_url, find_labels, has_file, is_offline_mode, is_remote_url, is_safetensors_available, is_tf_symbolic_tensor, logging, requires_backends, working_or_temp_dir, ) from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files if is_safetensors_available(): from safetensors import safe_open from safetensors.tensorflow import save_file as safe_save_file if TYPE_CHECKING: from . import PreTrainedTokenizerBase logger = logging.get_logger(__name__) if "TF_USE_LEGACY_KERAS" not in os.environ: os.environ["TF_USE_LEGACY_KERAS"] = "1" # Compatibility fix to make sure tf.keras stays at Keras 2 elif os.environ["TF_USE_LEGACY_KERAS"] != "1": logger.warning( "Transformers is only compatible with Keras 2, but you have explicitly set `TF_USE_LEGACY_KERAS` to `0`. " "This may result in unexpected behaviour or errors if Keras 3 objects are passed to Transformers models." ) try: import tf_keras as keras from tf_keras import backend as K except (ModuleNotFoundError, ImportError): import keras from keras import backend as K if parse(keras.__version__).major > 2: raise ValueError( "Your currently installed version of Keras is Keras 3, but this is not yet supported in " "Transformers. Please install the backwards-compatible tf-keras package with " "`pip install tf-keras`." ) tf_logger = tf.get_logger() TFModelInputType = Union[ List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], tf.Tensor, np.ndarray, ] def dummy_loss(y_true, y_pred): if y_pred.shape.rank <= 1: return y_pred else: reduction_axes = list(range(1, y_pred.shape.rank)) return tf.reduce_mean(y_pred, axis=reduction_axes) class TFModelUtilsMixin: """ A few utilities for `keras.Model`, to be used as a mixin. """ def num_parameters(self, only_trainable: bool = False) -> int: """ Get the number of (optionally, trainable) parameters in the model. Args: only_trainable (`bool`, *optional*, defaults to `False`): Whether or not to return only the number of trainable parameters Returns: `int`: The number of parameters. """ if only_trainable: return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables)) else: return self.count_params() def keras_serializable(cls): """ Decorate a Keras Layer class to support Keras serialization. This is done by: 1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at serialization time. 2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and convert it to a config object for the actual layer initializer. 3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not need to be supplied in `custom_objects` in the call to `keras.models.load_model`. Args: cls (a `keras.layers.Layers subclass`): Typically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its initializer. Returns: The same class object, with modifications for Keras deserialization. """ initializer = cls.__init__ config_class = getattr(cls, "config_class", None) if config_class is None: raise AttributeError("Must set `config_class` to use @keras_serializable") @functools.wraps(initializer) def wrapped_init(self, *args, **kwargs): config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None) if isinstance(config, dict): config = config_class.from_dict(config) initializer(self, config, *args, **kwargs) elif isinstance(config, PretrainedConfig): if len(args) > 0: initializer(self, *args, **kwargs) else: initializer(self, config, *args, **kwargs) else: raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)") self._config = config self._kwargs = kwargs cls.__init__ = wrapped_init if not hasattr(cls, "get_config"): raise TypeError("Only use @keras_serializable on keras.layers.Layer subclasses") if hasattr(cls.get_config, "_is_default"): def get_config(self): cfg = super(cls, self).get_config() cfg["config"] = self._config.to_dict() cfg.update(self._kwargs) return cfg cls.get_config = get_config cls._keras_serializable = True if hasattr(keras.utils, "register_keras_serializable"): cls = keras.utils.register_keras_serializable()(cls) return cls class TFCausalLanguageModelingLoss: """ Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if self.config.tf_legacy_loss: # make sure only labels that are not equal to -100 affect the loss active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_loss = loss_fn(tf.nn.relu(labels), logits) # make sure only labels that are not equal to -100 affect the loss loss_mask = tf.cast(labels != -100, dtype=unmasked_loss.dtype) masked_loss = unmasked_loss * loss_mask reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) return tf.reshape(reduced_masked_loss, (1,)) class TFQuestionAnsweringLoss: """ Loss function suitable for question answering. """ def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) start_loss = loss_fn(labels["start_position"], logits[0]) end_loss = loss_fn(labels["end_position"], logits[1]) return (start_loss + end_loss) / 2.0 class TFTokenClassificationLoss: """ Loss function suitable for token classification. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if tf.executing_eagerly(): # Data-dependent conditionals are forbidden in XLA if tf.math.reduce_any(labels == -1): tf.print("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") if self.config.tf_legacy_loss: # make sure only labels that are not equal to -100 # are taken into account as loss if tf.math.reduce_any(labels == -1): tf.print("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.") active_loss = tf.reshape(labels, (-1,)) != -1 else: active_loss = tf.reshape(labels, (-1,)) != -100 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_loss = loss_fn(tf.nn.relu(labels), logits) # make sure only labels that are not equal to -100 or -1 # are taken into account as loss loss_mask = tf.cast(labels >= 0, dtype=unmasked_loss.dtype) # Avoid possible division by zero later # Masked positions will have a loss of NaN because -100 and -1 are not valid labels masked_loss = unmasked_loss * loss_mask reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) return tf.reshape(reduced_masked_loss, (1,)) class TFSequenceClassificationLoss: """ Loss function suitable for sequence classification. """ def hf_compute_loss(self, labels, logits): if logits.shape.rank == 1 or logits.shape[1] == 1: loss_fn = keras.losses.MeanSquaredError(reduction=keras.losses.Reduction.NONE) if labels.shape.rank == 1: # MeanSquaredError returns a scalar loss if the labels are 1D, so avoid that labels = tf.expand_dims(labels, axis=-1) else: loss_fn = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=keras.losses.Reduction.NONE ) return loss_fn(labels, logits) class TFMultipleChoiceLoss: """Loss function suitable for multiple choice tasks.""" def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) return loss_fn(labels, logits) class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss): """ Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ class TFNextSentencePredictionLoss: """ Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence. <Tip> Any label of -100 will be ignored (along with the corresponding logits) in the loss computation. </Tip> """ def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if self.config.tf_legacy_loss: # make sure only labels that are not equal to -100 # are taken into account as loss next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss) next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss) return loss_fn(next_sentence_label, next_sentence_reduced_logits) # make sure only labels that are not equal to -100 # are taken into account as loss # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels), y_pred=logits) ns_loss_mask = tf.cast(labels != -100, dtype=unmasked_ns_loss.dtype) # Just zero out samples where label is -100, no reduction masked_ns_loss = unmasked_ns_loss * ns_loss_mask return masked_ns_loss def booleans_processing(config, **kwargs): """ Process the input booleans of each model. Args: config ([`PretrainedConfig`]): The config of the running model. **kwargs: The boolean parameters Returns: A dictionary with the proper values for each boolean """ final_booleans = {} # Pure conv models (such as ConvNext) do not have `output_attentions`. If the signature has # `output_attentions`, it will be present here in `kwargs`, even if unset (in that case, as `None`) if "output_attentions" in kwargs: final_booleans["output_attentions"] = ( kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions ) final_booleans["output_hidden_states"] = ( kwargs["output_hidden_states"] if kwargs["output_hidden_states"] is not None else config.output_hidden_states ) final_booleans["return_dict"] = kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict if "use_cache" in kwargs: final_booleans["use_cache"] = ( kwargs["use_cache"] if kwargs["use_cache"] is not None else getattr(config, "use_cache", None) ) return final_booleans def unpack_inputs(func): """ Decorator that processes the inputs to a Keras layer, passing them to the layer as keyword arguments. This enables downstream use of the inputs by their variable name, even if they arrive packed as a dictionary in the first input (common case in Keras). Args: func (`callable`): The callable function of the TensorFlow model. Returns: A callable that wraps the original `func` with the behavior described above. """ original_signature = inspect.signature(func) @functools.wraps(func) def run_call_with_unpacked_inputs(self, *args, **kwargs): # isolates the actual `**kwargs` for the decorated function kwargs_call = {key: val for key, val in kwargs.items() if key not in dict(original_signature.parameters)} fn_args_and_kwargs = {key: val for key, val in kwargs.items() if key not in kwargs_call} fn_args_and_kwargs.update({"kwargs_call": kwargs_call}) # move any arg into kwargs, if they exist fn_args_and_kwargs.update(dict(zip(func.__code__.co_varnames[1:], args))) # Encoder Decoder models delegate the application of the configuration options to their inner models. if "EncoderDecoder" in self.__class__.__name__: config = None else: config = self.config unpacked_inputs = input_processing(func, config, **fn_args_and_kwargs) return func(self, **unpacked_inputs) # Keras enforces the first layer argument to be passed, and checks it through `inspect.getfullargspec()`. This # function does not follow wrapper chains (i.e. ignores `functools.wraps()`), meaning that without the line below # Keras would attempt to check the first argument against the literal signature of the wrapper. run_call_with_unpacked_inputs.__signature__ = original_signature return run_call_with_unpacked_inputs def input_processing(func, config, **kwargs): """ Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input has to be named accordingly to the parameters name, i.e. `input_ids = keras.Input(shape=(128,), dtype='int32', name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training. Args: func (`callable`): The callable function of the TensorFlow model. config ([`PretrainedConfig`]): The config of the running model. **kwargs: The inputs of the model. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ signature = dict(inspect.signature(func).parameters) has_kwargs = bool(signature.pop("kwargs", None)) signature.pop("self", None) parameter_names = list(signature.keys()) main_input_name = parameter_names[0] main_input = kwargs.pop(main_input_name, None) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray) if "inputs" in kwargs["kwargs_call"]: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.", FutureWarning, ) output["input_ids"] = kwargs["kwargs_call"].pop("inputs") if "decoder_cached_states" in kwargs["kwargs_call"]: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use" " `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states") if "past" in kwargs["kwargs_call"] and "past_key_values" in parameter_names: warnings.warn( "The `past` argument is deprecated and will be removed in a future version, use `past_key_values`" " instead.", FutureWarning, ) kwargs["past_key_values"] = kwargs["kwargs_call"].pop("past") elif "past_key_values" in kwargs["kwargs_call"] and "past" in parameter_names: kwargs["past"] = kwargs["kwargs_call"].pop("past_key_values") if has_kwargs: output["kwargs"] = kwargs.pop("kwargs_call", {}) else: if len(kwargs["kwargs_call"]) > 0: raise ValueError( "The following keyword arguments are not supported by this model:" f" {list(kwargs['kwargs_call'].keys())}." ) kwargs.pop("kwargs_call") for k, v in kwargs.items(): if isinstance(v, allowed_types) or tf.is_tensor(v) or v is None: output[k] = v else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") if isinstance(main_input, (tuple, list)): for i, input in enumerate(main_input): # EagerTensors don't allow to use the .name property so we check for a real Tensor if is_tf_symbolic_tensor(input): # Tensor names have always the pattern `name:id` then we check only the # `name` part tensor_name = input.name.split(":")[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError( f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for" f" {parameter_names[i]}." ) elif isinstance(main_input, Mapping): if "inputs" in main_input: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids`" " instead.", FutureWarning, ) output["input_ids"] = main_input.pop("inputs") if "decoder_cached_states" in main_input: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use" " `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = main_input.pop("decoder_cached_states") for k, v in dict(main_input).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and "args" not in parameter_names: logger.warning( f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." ) continue else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") else: if tf.is_tensor(main_input) or main_input is None: output[main_input_name] = main_input else: raise ValueError( f"Data of type {type(main_input)} is not allowed only {allowed_types} is accepted for" f" {main_input_name}." ) # Populates any unspecified argument with their default value, according to the signature. for name in parameter_names: if name not in list(output.keys()) and name != "args": output[name] = kwargs.pop(name, signature[name].default) # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) # So to respect the proper output we have to add this exception if "args" in output: if output["args"] is not None and is_tf_symbolic_tensor(output["args"]): tensor_name = output["args"].name.split(":")[0] output[tensor_name] = output["args"] else: # `args` in this case is always the first parameter, then `input_ids` output["input_ids"] = output["args"] del output["args"] if "kwargs" in output: del output["kwargs"] cast_output = {} for key, val in output.items(): if isinstance(val, tf.Tensor) and val.dtype == tf.int64: cast_output[key] = tf.cast(val, tf.int32) elif isinstance(val, np.ndarray) and val.dtype == np.int64: cast_output[key] = val.astype(np.int32) else: cast_output[key] = val output = cast_output del cast_output if config is not None: boolean_dict = { k: v for k, v in output.items() if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] } output.update( booleans_processing( config=config, **boolean_dict, ) ) return output def dtype_byte_size(dtype): """ Returns the size (in bytes) occupied by one parameter of type `dtype`. Example: ```py >>> dtype_byte_size(tf.float32) 4 ``` """ if dtype == tf.bool: return 1 / 8 bit_search = re.search(r"[^\d](\d+)$", dtype.name) if bit_search is None: raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") bit_size = int(bit_search.groups()[0]) return bit_size // 8 def strip_model_name_and_prefix(name, _prefix=None): if _prefix is not None and name.startswith(_prefix): name = name[len(_prefix) :] if name.startswith("/"): name = name[1:] if "model." not in name and len(name.split("/")) > 1: name = "/".join(name.split("/")[1:]) return name def tf_shard_checkpoint(weights, max_shard_size="10GB", weights_name: str = TF2_WEIGHTS_NAME): """ Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. <Tip warning={true}> If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. </Tip> Args: weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). """ max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [] current_block = [] current_block_size = 0 total_size = 0 for item in weights: weight_size = item.numpy().size * dtype_byte_size(item.dtype) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: sharded_state_dicts.append(current_block) current_block = [] current_block_size = 0 current_block.append(item) current_block_size += weight_size total_size += weight_size # Add the last block sharded_state_dicts.append(current_block) # If we only have one shard, we return it if len(sharded_state_dicts) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): shard_file = weights_name.replace(".h5", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5") shard_file = shard_file.replace( ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors" ) shards[shard_file] = shard for weight in shard: weight_name = weight.name weight_map[weight_name] = shard_file # Add the metadata metadata = {"total_size": total_size} index = {"metadata": metadata, "weight_map": weight_map} return shards, index def load_tf_sharded_weights(model, shard_files, ignore_mismatched_sizes=False, strict=False, _prefix=None): """ This is the same as `load_tf_weights` but for a sharded checkpoint. Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and shapes. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`keras.models.Model`): The model in which to load the checkpoint. shard_files (`str` or `os.PathLike`): A list containing the sharded checkpoint names. ignore_mismatched_sizes`bool`, *optional`, defaults to `True`): Whether or not to ignore the mismatch between the sizes strict (`bool`, *optional*, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. """ # Load the index unexpected_keys = set() saved_keys = set() mismatched_keys = set() # Since TF adds the name of the class to its weights, and uses the index and not the name of the layer to load # the weight, we have to get rid of the first prefix of the name of the layer. model_keys = set() model_layer_map = {} for i, k in enumerate(model.weights): layer_name = k.name if _prefix is not None and layer_name.startswith(_prefix): layer_name = layer_name[len(_prefix) :] layer_name = layer_name.lstrip("/") if not ("model." in layer_name or len(layer_name.split("/")) == 1): layer_name = "/".join(layer_name.split("/")[1:]) model_keys.add(layer_name) model_layer_map[layer_name] = i for shard_file in shard_files: saved_weight_names_set, unexpected_keys_set, mismatched_keys_set = load_tf_shard( model, model_layer_map, shard_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix, ) saved_keys.update(saved_weight_names_set) unexpected_keys.update(unexpected_keys_set) mismatched_keys.update(mismatched_keys_set) gc.collect() missing_keys = model_keys - saved_keys if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}" if len(missing_keys) > 0: str_missing_keys = ",".join([f'"{k}"' for k in missing_keys]) error_message += f"\nMissing key(s): {str_missing_keys}." if len(unexpected_keys) > 0: str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys]) error_message += f"\nMissing key(s): {str_unexpected_keys}." raise RuntimeError(error_message) return missing_keys, unexpected_keys, mismatched_keys def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): """ Loads a shard from a sharded checkpoint file. Can be either H5 or Safetensors. Handles missing keys and unexpected keys. Args: model (`keras.models.Model`): Model in which the weights are loaded model_layer_map (`Dict`): A dictionary mapping the layer name to the index of the layer in the model. resolved_archive_file (`str`): Path to the checkpoint file from which the weights will be loaded ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether to ignore the mismatched keys Returns: `keras.models.Model`: Three lists, one for the layers that were found and succesfully restored (from the shard file), one for the mismatched layers, and another one for the unexpected layers. """ saved_weight_names_set = set() saved_weights = {} mismatched_keys = set() unexpected_keys = set() # Read the H5 file try: with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set(load_attributes_from_hdf5_group(sharded_checkpoint_file, "layer_names")) weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer_name in saved_h5_model_layers_name: h5_layer_object = sharded_checkpoint_file[layer_name] saved_weights[layer_name] = np.asarray(h5_layer_object) saved_weight_names_set.add(layer_name) if layer_name not in model_layer_map: unexpected_keys.add(layer_name) else: symbolic_weight = model.weights[model_layer_map[layer_name]] saved_weight_value = saved_weights[layer_name] # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_keys.add( (layer_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) ) continue else: raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) K.batch_set_value(weight_value_tuples) return saved_weight_names_set, unexpected_keys, mismatched_keys except Exception as e: try: with open(resolved_archive_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else: raise ValueError( f"Unable to locate the file {resolved_archive_file} which is necessary to load this pretrained" " model. Make sure you have saved the model properly." ) from e except (UnicodeDecodeError, ValueError): raise OSError( f"Unable to load weights from TF checkpoint file for '{resolved_archive_file}' " f"at '{resolved_archive_file}'. " "If you tried to load a TF model from a sharded checkpoint, you should try converting the model " "by loading it in pytorch and saving it localy. A convertion script should be realeased soon." ) def load_tf_sharded_weights_from_safetensors( model, shard_files, ignore_mismatched_sizes=False, strict=False, _prefix=None ): """ This is the same as `load_tf_weights_from_safetensors` but for a sharded TF-format safetensors checkpoint. Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and shapes. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being loaded in the model. Args: model (`keras.models.Model`): The model in which to load the checkpoint. shard_files (`str` or `os.PathLike`): A list containing the sharded checkpoint names. ignore_mismatched_sizes`bool`, *optional`, defaults to `True`): Whether or not to ignore the mismatch between the sizes strict (`bool`, *optional*, defaults to `True`): Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. """ # Load the index unexpected_keys = set() all_missing_keys = [] mismatched_keys = set() for shard_file in shard_files: missing_layers, unexpected_layers, mismatched_layers = load_tf_weights_from_safetensors( model, shard_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix, ) all_missing_keys.append(set(missing_layers)) unexpected_keys.update(unexpected_layers) mismatched_keys.update(mismatched_layers) gc.collect() missing_keys = set.intersection(*all_missing_keys) if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}" if len(missing_keys) > 0: str_missing_keys = ",".join([f'"{k}"' for k in missing_keys]) error_message += f"\nMissing key(s): {str_missing_keys}." if len(unexpected_keys) > 0: str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys]) error_message += f"\nMissing key(s): {str_unexpected_keys}." raise RuntimeError(error_message) return missing_keys, unexpected_keys, mismatched_keys def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): """ Detect missing and unexpected layers and load the TF weights from the shard file accordingly to their names and shapes. Args: model (`keras.models.Model`): The model to load the weights into. resolved_archive_file (`str`): The location of the H5 file. ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to ignore weights with shapes that don't match between the checkpoint of the model. Returns: Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the mismatched layers. """ if resolved_archive_file.endswith(".safetensors"): load_function = load_tf_weights_from_safetensors else: load_function = load_tf_weights_from_h5 return load_function( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix ) def load_tf_weights_from_h5(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): mismatched_layers = [] # Read the H5 file with h5py.File(resolved_archive_file, "r") as sharded_checkpoint_file: # Retrieve the name of each layer from the H5 file saved_h5_model_layers_name = set(load_attributes_from_hdf5_group(sharded_checkpoint_file, "layer_names")) # Find the missing layers from the high level list of layers missing_layers = list({layer.name for layer in model.layers} - saved_h5_model_layers_name) # Find the unexpected layers from the high level list of layers unexpected_layers = list(saved_h5_model_layers_name - {layer.name for layer in model.layers}) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] # Compute missing and unexpected sub layers # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...] for layer in model.layers: # if layer_name from the H5 file belongs to the layers from the instantiated model if layer.name in saved_h5_model_layers_name: # Get the H5 layer object from its name h5_layer_object = sharded_checkpoint_file[layer.name] # Get all the weights as a list from the layer object symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} # Create a dict from the H5 saved model that looks like {"weight_name": weight_value} # And a set with only the names for weight_name in load_attributes_from_hdf5_group(h5_layer_object, "weight_names"): # TF names always start with the model name so we ignore it name = "/".join(weight_name.split("/")[1:]) if _prefix is not None: name = _prefix + "/" + name saved_weights[name] = np.asarray(h5_layer_object[weight_name]) # Add the updated name to the final list for computing missing/unexpected values saved_weight_names_set.add(name) # Loop over each weights from the instantiated model and compare with the weights from the H5 file for symbolic_weight in symbolic_weights: # TF names always start with the model name so we ignore it if _prefix is not None: delimeter = len(_prefix.split("/")) symbolic_weight_name = "/".join( symbolic_weight.name.split("/")[:delimeter] + symbolic_weight.name.split("/")[delimeter + 1 :] ) else: symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:]) # here we check if the current weight is among the weights from the H5 file # If yes, get the weight_value of the corresponding weight from the H5 file # If not, make the value to None saved_weight_value = saved_weights.get(symbolic_weight_name, None) # Retrocompatibility patch: some embeddings are stored with the weights name (e.g. Bart's # `model.shared/embeddings:0` are stored as `model.shared/weights:0`) if saved_weight_value is None and symbolic_weight_name.endswith("embeddings:0"): symbolic_weight_name = symbolic_weight_name[:-12] + "weight:0" saved_weight_value = saved_weights.get(symbolic_weight_name, None) # Add the updated name to the final list for computing missing/unexpected values symbolic_weights_names.add(symbolic_weight_name) # If the current weight is found if saved_weight_value is not None: # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(symbolic_weight) != saved_weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_layers.append( (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight)) ) continue else: raise e else: array = saved_weight_value # We create the tuple that will be loaded and add it to the final list weight_value_tuples.append((symbolic_weight, array)) # Load all the weights K.batch_set_value(weight_value_tuples) # Compute the missing and unexpected layers missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) return missing_layers, unexpected_layers, mismatched_layers def load_tf_weights_from_safetensors(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): # Read the safetensors file with safe_open(resolved_archive_file, framework="tf") as safetensors_archive: mismatched_layers = [] weight_names = [strip_model_name_and_prefix(w.name, _prefix=_prefix) for w in model.weights] loaded_weight_names = list(safetensors_archive.keys()) # Find the missing layers from the high level list of layers missing_layers = list(set(weight_names) - set(loaded_weight_names)) # Find the unexpected layers from the high level list of layers unexpected_layers = list(set(loaded_weight_names) - set(weight_names)) for weight in model.weights: weight_name = strip_model_name_and_prefix(weight.name, _prefix=_prefix) if weight_name in loaded_weight_names: weight_value = safetensors_archive.get_tensor(weight_name) # Check if the shape of the current weight and the one from the H5 file are different if K.int_shape(weight) != weight_value.shape: # If yes we reshape the weight from the H5 file accordingly to the current weight # If the two shapes are not compatible we raise an issue try: weight_value = tf.reshape(weight_value, K.int_shape(weight)) except (ValueError, tf.errors.InvalidArgumentError) as e: if ignore_mismatched_sizes: mismatched_layers.append((weight_name, weight_value.shape, K.int_shape(weight))) continue else: raise e K.set_value(weight, weight_value) # weight.assign() might break if weight is a DTensor return missing_layers, unexpected_layers, mismatched_layers def init_copy_embeddings(old_embeddings, new_num_tokens): r""" This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be kept or not. Example: - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4] - mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1] - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5] - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4] """ old_num_tokens, old_embedding_dim = shape_list(old_embeddings) size_diff = new_num_tokens - old_num_tokens # initialize new embeddings # Copy token embeddings from the previous ones if tf.math.greater(size_diff, 0): # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size # and we create a mask to properly identify the padded values and be replaced by the values of the newly created # embeddings current_weights = tf.pad( old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1 ) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True) mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False) else: # if the new size if lower than the old one, we take the current embeddings until the new size current_weights = tf.slice( old_embeddings.value(), tf.convert_to_tensor([0, 0]), tf.convert_to_tensor([new_num_tokens, old_embedding_dim]), ) mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True) return mask, current_weights class TFPreTrainedModel(keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin): r""" Base class for all TF models. [`TFPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: - resize the input embeddings, - prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class for this model architecture. - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP models, `pixel_values` for vision models and `input_values` for speech models). """ config_class = None base_model_prefix = "" main_input_name = "input_ids" _auto_class = None _using_dummy_loss = None _label_to_output_map = None # a list of re pattern of tensor names to ignore from the model when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_missing = None # a list of re pattern of tensor names to ignore from the weights when loading the model weights # (and avoid unnecessary warnings). _keys_to_ignore_on_load_unexpected = None _requires_load_weight_prefix = False @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: """ Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs. """ dummies = {} for key, spec in self.input_signature.items(): # 2 is the most correct arbitrary size. I will not be taking questions dummy_shape = [dim if dim is not None else 2 for dim in spec.shape] if spec.shape[0] is None: # But let's make the batch size 1 to save memory anyway dummy_shape[0] = 1 dummies[key] = tf.ones(shape=dummy_shape, dtype=spec.dtype) if key == "token_type_ids": # Some models have token_type_ids but with a vocab_size of 1 dummies[key] = tf.zeros_like(dummies[key]) if self.config.add_cross_attention and "encoder_hidden_states" in inspect.signature(self.call).parameters: if "encoder_hidden_states" not in dummies: if self.main_input_name == "input_ids": dummies["encoder_hidden_states"] = tf.ones( shape=(1, 2, self.config.hidden_size), dtype=tf.float32, name="encoder_hidden_states" ) else: raise NotImplementedError( "Model has cross-attention but we couldn't infer the shape for the encoder hidden states. Please manually override dummy_inputs!" ) return dummies def build_in_name_scope(self): with tf.name_scope(self.name): self.build(input_shape=None) @property def framework(self) -> str: """ :str: Identifies that this is a TensorFlow model. """ return "tf" def build(self, input_shape=None): pass # This is just here to make sure we don't call the superclass build() def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) if not isinstance(config, PretrainedConfig): raise TypeError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " "`PretrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" ) # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None self._set_save_spec(self.input_signature) def get_config(self): return self.config.to_dict() @functools.wraps(keras.Model.fit) def fit(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().fit(*args, **kwargs) @functools.wraps(keras.Model.train_on_batch) def train_on_batch(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().train_on_batch(*args, **kwargs) @functools.wraps(keras.Model.test_on_batch) def test_on_batch(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().test_on_batch(*args, **kwargs) @functools.wraps(keras.Model.predict_on_batch) def predict_on_batch(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().predict_on_batch(*args, **kwargs) @functools.wraps(keras.Model.predict) def predict(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().predict(*args, **kwargs) @functools.wraps(keras.Model.evaluate) def evaluate(self, *args, **kwargs): args, kwargs = convert_batch_encoding(*args, **kwargs) return super().evaluate(*args, **kwargs) @classmethod def from_config(cls, config, **kwargs): if isinstance(config, PretrainedConfig): return cls._from_config(config, **kwargs) return cls._from_config(cls.config_class.from_dict(config, **kwargs)) @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) def get_head_mask(self, head_mask: tf.Tensor | None, num_hidden_layers: int) -> tf.Tensor: """ Prepare the head mask if needed. Args: head_mask (`tf.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). num_hidden_layers (`int`): The number of hidden layers in the model. Returns: `tf.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with `[None]` for each layer. """ if head_mask is not None: head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) else: head_mask = [None] * num_hidden_layers return head_mask def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" if head_mask.shape.rank == 1: head_mask = head_mask[None, None, :, None, None] head_mask = tf.repeat(head_mask, repeats=num_hidden_layers, axis=0) elif head_mask.shape.rank == 2: head_mask = head_mask[:, None, :, None, None] assert head_mask.shape.rank == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" head_mask = tf.cast(head_mask, tf.float32) # switch to float if need + fp16 compatibility return head_mask @tf.function def serving(self, inputs): """ Args: Method used for serving the model. Does not have a specific signature, but will be specialized as concrete functions when saving with `save_pretrained`. inputs (`Dict[str, tf.Tensor]`): The input of the saved model as a dictionary of tensors. """ output = self.call(inputs) return self.serving_output(output) @property def input_signature(self) -> Dict[str, tf.TensorSpec]: """ This property should return a dict mapping input names to tf.TensorSpec objects, representing the expected shape and dtype for model inputs. It is used for both serving and for generating dummy inputs. """ model_inputs = list(inspect.signature(self.call).parameters) sig = {} if "input_ids" in model_inputs: if self.__class__.__name__.endswith("ForMultipleChoice"): text_dims = 3 else: text_dims = 2 for input_name in ( "input_ids", "attention_mask", "token_type_ids", "decoder_input_ids", "decoder_attention_mask", ): if input_name in model_inputs: sig[input_name] = tf.TensorSpec([None] * text_dims, tf.int32, name=input_name) if "pixel_values" in model_inputs: pixel_values_shape = [None, None, None, None] if hasattr(self.config, "vision_config"): vision_config = self.config.vision_config else: vision_config = self.config if hasattr(vision_config, "num_channels"): pixel_values_shape[1] = vision_config.num_channels else: raise NotImplementedError( "Could not infer number of channels from config, please override input_signature to specify input shapes." ) if hasattr(vision_config, "image_size"): pixel_values_shape[2] = pixel_values_shape[3] = vision_config.image_size elif hasattr(vision_config, "input_size"): pixel_values_shape[2] = pixel_values_shape[3] = vision_config.input_size else: raise NotImplementedError( "Could not infer input image shape from config, please override input_signature to specify input shapes." ) sig["pixel_values"] = tf.TensorSpec(pixel_values_shape, tf.float32, name="pixel_values") if "input_features" in model_inputs: raise NotImplementedError("Audio models need a manually defined input_signature") return sig def serving_output(self, output): """ Prepare the output of the saved model. Can be overridden if specific serving modifications are required. """ if not isinstance(output, ModelOutput): return output for key in output: if key.endswith("hidden_states") and not getattr(self.config, "output_hidden_states", False): output[key] = None elif key.endswith("attentions") and not getattr(self.config, "output_attentions", False): output[key] = None elif key == "past_key_values" and not getattr(self.config, "use_cache", False): output[key] = None elif key == "cross_attentions" and not ( getattr(self.config, "output_attentions", False) and getattr(self.config, "add_cross_attention", False) ): output[key] = None if isinstance(output[key], (tuple, list)): try: output[key] = tf.convert_to_tensor(output[key]) except (ValueError, tf.errors.InvalidArgumentError): pass # Layers may not have the same dimensions return output @classmethod def can_generate(cls) -> bool: """ Returns whether this model can generate sequences with `.generate()`. Returns: `bool`: Whether this model can generate sequences with `.generate()`. """ # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation. # Alternativelly, the model can also have a custom `generate` function. if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate): return False return True def get_input_embeddings(self) -> keras.layers.Layer: """ Returns the model's input embeddings layer. Returns: `tf.Variable`: The embeddings layer mapping vocabulary to hidden states. """ main_layer = getattr(self, self.base_model_prefix, self) if main_layer is not self: return main_layer.get_input_embeddings() else: raise NotImplementedError def _save_checkpoint(self, checkpoint_dir, epoch): if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir) # We avoid tf.train.checkpoint or saving weights in TF format, even though that includes optimizer # state for us, because it requires special handling for objects like custom losses, which we use # internally and which users are likely to use too weights_path = os.path.join(checkpoint_dir, "weights.h5") self.save_weights(weights_path) extra_data = {"epoch": epoch, "optimizer_state": self.optimizer.get_weights()} extra_data_path = os.path.join(checkpoint_dir, "extra_data.pickle") with open(extra_data_path, "wb") as f: pickle.dump(extra_data, f) def prepare_tf_dataset( self, dataset: "datasets.Dataset", # noqa:F821 batch_size: int = 8, shuffle: bool = True, tokenizer: Optional["PreTrainedTokenizerBase"] = None, collate_fn: Optional[Callable] = None, collate_fn_args: Optional[Dict[str, Any]] = None, drop_remainder: Optional[bool] = None, prefetch: bool = True, ): """ Wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset` with collation and batching. This method is designed to create a "ready-to-use" dataset that can be passed directly to Keras methods like `fit()` without further modification. The method will drop columns from the dataset if they don't match input names for the model. If you want to specify the column names to return rather than using the names that match this model, we recommend using `Dataset.to_tf_dataset()` instead. Args: dataset (`Any`): A [~`datasets.Dataset`] to be wrapped as a `tf.data.Dataset`. batch_size (`int`, *optional*, defaults to 8): The size of batches to return. shuffle (`bool`, defaults to `True`): Whether to return samples from the dataset in random order. Usually `True` for training datasets and `False` for validation/test datasets. tokenizer ([`PreTrainedTokenizerBase`], *optional*): A `PreTrainedTokenizer` that will be used to pad samples to create batches. Has no effect if a specific `collate_fn` is passed instead. collate_fn (`Callable`, *optional*): A function that collates samples from the dataset into a single batch. Defaults to `DefaultDataCollator` if no `tokenizer` is supplied or `DataCollatorWithPadding` if a `tokenizer` is passed. collate_fn_args (`Dict[str, Any]`, *optional*): A dict of arguments to pass to the `collate_fn` alongside the list of samples. drop_remainder (`bool`, *optional*): Whether to drop the final batch, if the batch_size does not evenly divide the dataset length. Defaults to the same setting as `shuffle`. prefetch (`bool`, defaults to `True`): Whether to add prefetching to the end of the `tf.data` pipeline. This is almost always beneficial for performance, but can be disabled in edge cases. Returns: `Dataset`: A `tf.data.Dataset` which is ready to pass to the Keras API. """ requires_backends(self, ["datasets"]) import datasets if collate_fn is None: if tokenizer is None: collate_fn = DefaultDataCollator(return_tensors="np") else: collate_fn = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="np") if collate_fn_args is None: collate_fn_args = {} if not isinstance(dataset, datasets.Dataset): raise TypeError("Dataset argument should be a datasets.Dataset!") model_inputs = list(inspect.signature(self.call).parameters) model_labels = find_labels(self.__class__) if "cols_to_retain" in list(inspect.signature(dataset._get_output_signature).parameters.keys()): output_signature, _ = dataset._get_output_signature( dataset, batch_size=None, collate_fn=collate_fn, collate_fn_args=collate_fn_args, cols_to_retain=model_inputs, ) else: # TODO Matt: This is a workaround for older versions of datasets that are missing the `cols_to_retain` # argument. We should remove this once the minimum supported version of datasets is > 2.3.2 unwanted_columns = [ feature for feature in dataset.features if feature not in model_inputs and feature not in ("label_ids", "label") ] dataset = dataset.remove_columns(unwanted_columns) output_signature, _ = dataset._get_output_signature( dataset, batch_size=None, collate_fn=collate_fn, collate_fn_args=collate_fn_args ) output_columns = list(output_signature.keys()) feature_cols = [col for col in output_columns if col in model_inputs and col not in model_labels] label_cols = [col for col in output_columns if col in model_labels] # Backwards compatibility for older versions of datasets. Previously, if `columns` or `label_cols` # were a single element list, the returned element spec would be a single element. Now, passing [feature] # will return a dict structure {"feature": feature}, and passing a single string will return a single element. feature_cols = feature_cols[0] if len(feature_cols) == 1 else feature_cols label_cols = label_cols[0] if len(label_cols) == 1 else label_cols if drop_remainder is None: drop_remainder = shuffle tf_dataset = dataset.to_tf_dataset( columns=feature_cols, label_cols=label_cols, batch_size=batch_size, shuffle=shuffle, drop_remainder=drop_remainder, collate_fn=collate_fn, collate_fn_args=collate_fn_args, prefetch=prefetch, ) return tf_dataset def compile( self, optimizer="rmsprop", loss="auto_with_warning", metrics=None, loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None, **kwargs, ): """ This is a thin wrapper that sets the model's loss output head as the loss if the user does not specify a loss function themselves. """ if loss in ("auto_with_warning", "passthrough"): # "passthrough" for workflow backward compatibility logger.info( "No loss specified in compile() - the model's internal loss computation will be used as the " "loss. Don't panic - this is a common way to train TensorFlow models in Transformers! " "To disable this behaviour please pass a loss argument, or explicitly pass " "`loss=None` if you do not want your model to compute a loss. You can also specify `loss='auto'` to " "get the internal loss without printing this info string." ) loss = "auto" if loss == "auto": loss = dummy_loss self._using_dummy_loss = True else: self._using_dummy_loss = False parent_args = list(inspect.signature(keras.Model.compile).parameters.keys()) # This argument got renamed, we need to support both versions if "steps_per_execution" in parent_args: super().compile( optimizer=optimizer, loss=loss, metrics=metrics, loss_weights=loss_weights, weighted_metrics=weighted_metrics, run_eagerly=run_eagerly, steps_per_execution=steps_per_execution, **kwargs, ) else: super().compile( optimizer=optimizer, loss=loss, metrics=metrics, loss_weights=loss_weights, weighted_metrics=weighted_metrics, run_eagerly=run_eagerly, experimental_steps_per_execution=steps_per_execution, **kwargs, ) def compute_loss(self, *args, **kwargs): if hasattr(keras.Model, "compute_loss"): # This will be true in TF 2.8 or greater return super().compute_loss(*args, **kwargs) else: warnings.warn( "The old compute_loss method is deprecated as it conflicts with the Keras compute_loss " "method added in TF 2.8. If you want the original HF compute_loss, please call " "hf_compute_loss() instead. From TF versions >= 2.8, or Transformers versions >= 5, " "calling compute_loss() will get the Keras method instead.", FutureWarning, ) return self.hf_compute_loss(*args, **kwargs) def get_label_to_output_name_mapping(self): arg_names = list(inspect.signature(self.call).parameters) if self._label_to_output_map is not None: return self._label_to_output_map elif "start_positions" in arg_names: return {"start_positions": "start_logits", "end_positions": "end_logits"} elif "sentence_order_label" in arg_names: return {"labels": "prediction_logits", "sentence_order_label": "sop_logits"} elif "next_sentence_label" in arg_names: return {"labels": "prediction_logits", "next_sentence_label": "seq_relationship_logits"} elif "mc_labels" in arg_names: return {"labels": "logits", "mc_labels": "mc_logits"} else: return {} def train_step(self, data): """ A modification of Keras's default `train_step` that correctly handles matching outputs to labels for our models and supports directly training on the loss output head. In addition, it ensures input keys are copied to the labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure that they are available to the model during the forward pass. """ # We hardcode the most common renamings; models with weirder names can set `self._label_to_output_map` arg_names = list(inspect.signature(self.call).parameters) label_kwargs = find_labels(self.__class__) label_to_output = self.get_label_to_output_name_mapping() output_to_label = {val: key for key, val in label_to_output.items()} if not self._using_dummy_loss and parse(tf.__version__) < parse("2.11.0"): # Newer TF train steps leave this out data = expand_1d(data) x, y, sample_weight = keras.utils.unpack_x_y_sample_weight(data) # If the inputs are mutable dictionaries, make a shallow copy of them because we will modify # them during input/label pre-processing. This avoids surprising the user by wrecking their data. # In addition, modifying mutable Python inputs makes XLA compilation impossible. if isinstance(x, dict): x = x.copy() if isinstance(y, dict): y = y.copy() # When using a dummy loss, we ensure that separate labels are copied to the correct model arguments, # if those keys are not already present in the input dict if self._using_dummy_loss and y is not None: # If y is a tensor and the model only has one label-like input, map y to that input if len(label_kwargs) == 1 and isinstance(y, tf.Tensor): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} label_kwarg = next(iter(label_kwargs)) if label_kwarg not in x: x[label_kwarg] = y # Otherwise, copy keys from y to x as long as they weren't already present in x elif isinstance(y, dict): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} for key, val in y.items(): if key in arg_names and key not in x: x[key] = val elif output_to_label.get(key, None) in arg_names and key not in x: x[output_to_label[key]] = val if y is None: y = {key: val for key, val in x.items() if key in label_kwargs} if not y and not self._using_dummy_loss: raise ValueError("Could not find label column(s) in input dict and no separate labels were provided!") if isinstance(y, dict): # Rename labels at this point to match output heads y = {label_to_output.get(key, key): val for key, val in y.items()} # Run forward pass. with tf.GradientTape() as tape: if self._using_dummy_loss and "return_loss" in arg_names: y_pred = self(x, training=True, return_loss=True) else: y_pred = self(x, training=True) if self._using_dummy_loss: loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses) else: loss = None # This next block matches outputs to label keys. Tensorflow's standard method for doing this # can get very confused if any of the keys contain nested values (e.g. lists/tuples of Tensors) if isinstance(y, dict) and len(y) == 1: if list(y.keys())[0] in y_pred.keys(): y_pred = y_pred[list(y.keys())[0]] elif list(y_pred.keys())[0] == "loss": y_pred = y_pred[1] else: y_pred = y_pred[0] _, y = y.popitem() elif isinstance(y, dict): # If the labels are a dict, match keys from the output by name y_pred = {key: val for key, val in y_pred.items() if key in y} elif isinstance(y, tuple) or isinstance(y, list): # If the labels are a tuple/list, match keys to the output by order, skipping the loss. if list(y_pred.keys())[0] == "loss": y_pred = y_pred.to_tuple()[1:] else: y_pred = y_pred.to_tuple() y_pred = y_pred[: len(y)] # Remove unused fields in case those cause problems else: # If the labels are a single tensor, match them to the first non-loss tensor in the output if list(y_pred.keys())[0] == "loss": y_pred = y_pred[1] else: y_pred = y_pred[0] if loss is None: loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) # Run backwards pass. self.optimizer.minimize(loss, self.trainable_variables, tape=tape) self.compiled_metrics.update_state(y, y_pred, sample_weight) # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics def test_step(self, data): """ A modification of Keras's default `train_step` that correctly handles matching outputs to labels for our models and supports directly training on the loss output head. In addition, it ensures input keys are copied to the labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure that they are available to the model during the forward pass. """ # We hardcode the most common renamings; models with weirder names can set `self._label_to_output_map` arg_names = list(inspect.signature(self.call).parameters) label_kwargs = find_labels(self.__class__) label_to_output = self.get_label_to_output_name_mapping() output_to_label = {val: key for key, val in label_to_output.items()} if not self._using_dummy_loss and parse(tf.__version__) < parse("2.11.0"): # Newer versions leave this out data = expand_1d(data) x, y, sample_weight = keras.utils.unpack_x_y_sample_weight(data) # If the inputs are mutable dictionaries, make a shallow copy of them because we will modify # them during input/label pre-processing. This avoids surprising the user by wrecking their data. # In addition, modifying mutable Python inputs makes XLA compilation impossible. if isinstance(x, dict): x = x.copy() if isinstance(y, dict): y = y.copy() # When using a dummy loss, we ensure that separate labels are copied to the correct model arguments, # if those keys are not already present in the input dict if self._using_dummy_loss and y is not None: arg_names = list(inspect.signature(self.call).parameters) # If y is a tensor and the model only has one label-like input, map y to that input if len(label_kwargs) == 1 and isinstance(y, tf.Tensor): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} label_kwarg = next(iter(label_kwargs)) if label_kwarg not in x: x[label_kwarg] = y # Otherwise, copy keys from y to x as long as they weren't already present in x elif isinstance(y, dict): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} for key, val in y.items(): if key in arg_names and key not in x: x[key] = val elif output_to_label.get(key, None) in arg_names and key not in x: x[output_to_label[key]] = val if y is None: y = {key: val for key, val in x.items() if key in label_kwargs} if not y and not self._using_dummy_loss: raise ValueError("Could not find label column(s) in input dict and no separate labels were provided!") if isinstance(y, dict): # Rename labels at this point to match output heads y = {label_to_output.get(key, key): val for key, val in y.items()} # Run forward pass. if self._using_dummy_loss and "return_loss" in arg_names: y_pred = self(x, return_loss=True, training=False) else: y_pred = self(x, training=False) if self._using_dummy_loss: loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses) else: loss = None # This next block matches outputs to label keys. Tensorflow's standard method for doing this # can get very confused if any of the keys contain nested values (e.g. lists/tuples of Tensors) if isinstance(y, dict) and len(y) == 1: if list(y.keys())[0] in y_pred.keys(): y_pred = y_pred[list(y.keys())[0]] elif list(y_pred.keys())[0] == "loss": y_pred = y_pred[1] else: y_pred = y_pred[0] _, y = y.popitem() elif isinstance(y, dict): # If the labels are a dict, match keys from the output by name y_pred = {key: val for key, val in y_pred.items() if key in y} elif isinstance(y, tuple) or isinstance(y, list): # If the labels are a tuple/list, match keys to the output by order, skipping the loss. if list(y_pred.keys())[0] == "loss": y_pred = y_pred.to_tuple()[1:] else: y_pred = y_pred.to_tuple() y_pred = y_pred[: len(y)] # Remove unused fields in case those cause problems else: # If the labels are a single tensor, match them to the first non-loss tensor in the output if list(y_pred.keys())[0] == "loss": y_pred = y_pred[1] else: y_pred = y_pred[0] if loss is None: loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) self.compiled_metrics.update_state(y, y_pred, sample_weight) # Collect metrics to return return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics def create_model_card( self, output_dir, model_name: str, language: Optional[str] = None, license: Optional[str] = None, tags: Optional[str] = None, finetuned_from: Optional[str] = None, tasks: Optional[str] = None, dataset_tags: Optional[Union[str, List[str]]] = None, dataset: Optional[Union[str, List[str]]] = None, dataset_args: Optional[Union[str, List[str]]] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: output_dir (`str` or `os.PathLike`): The folder in which to create the model card. model_name (`str`, *optional*): The name of the model. language (`str`, *optional*): The language of the model (if applicable) license (`str`, *optional*): The license of the model. Will default to the license of the pretrained model used, if the original model given to the `Trainer` comes from a repo on the Hub. tags (`str` or `List[str]`, *optional*): Some tags to be included in the metadata of the model card. finetuned_from (`str`, *optional*): The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo of the original model given to the `Trainer` (if it comes from the Hub). tasks (`str` or `List[str]`, *optional*): One or several task identifiers, to be included in the metadata of the model card. dataset_tags (`str` or `List[str]`, *optional*): One or several dataset tags, to be included in the metadata of the model card. dataset (`str` or `List[str]`, *optional*): One or several dataset identifiers, to be included in the metadata of the model card. dataset_args (`str` or `List[str]`, *optional*): One or several dataset arguments, to be included in the metadata of the model card. """ # Avoids a circular import by doing this when necessary. from .modelcard import TrainingSummary # tests_ignore training_summary = TrainingSummary.from_keras( self, keras_history=self.history, language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, ) model_card = training_summary.to_model_card() with open(os.path.join(output_dir, "README.md"), "w") as f: f.write(model_card) def set_input_embeddings(self, value): """ Set model's input embeddings Args: value (`tf.Variable`): The new weights mapping hidden states to vocabulary. """ main_layer = getattr(self, self.base_model_prefix) if main_layer is None: raise NotImplementedError("The model does not implements the base_model_prefix attribute.") try: main_layer.set_input_embeddings(value) except AttributeError: logger.info("Building the model") self.build_in_name_scope() main_layer.set_input_embeddings(value) def get_output_embeddings(self) -> Union[None, keras.layers.Layer]: """ Returns the model's output embeddings Returns: `tf.Variable`: The new weights mapping vocabulary to hidden states. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_output_embeddings() except AttributeError: logger.info("Building the model") self.build_in_name_scope() return lm_head().get_output_embeddings() return None # Overwrite for models with output embeddings def set_output_embeddings(self, value): """ Set model's output embeddings Args: value (`tf.Variable`): The new weights mapping hidden states to vocabulary. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_output_embeddings(value) except AttributeError: logger.info("Building the model") self.build_in_name_scope() lm_head.set_output_embeddings(value) def get_output_layer_with_bias(self) -> Union[None, keras.layers.Layer]: """ Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the embeddings Return: `keras.layers.Layer`: The layer that handles the bias, None if not an LM model. """ warnings.warn( "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning ) return self.get_lm_head() def get_prefix_bias_name(self) -> Union[None, str]: """ Get the concatenated _prefix name of the bias from the model name to the parent layer Return: `str`: The _prefix name of the bias. """ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return None def get_bias(self) -> Union[None, Dict[str, tf.Variable]]: """ Dict of bias attached to an LM head. The key represents the name of the bias attribute. Return: `tf.Variable`: The weights representing the bias, None if not an LM model. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_bias() except AttributeError: self.build_in_name_scope() return lm_head.get_bias() return None def set_bias(self, value): """ Set all the bias in the LM head. Args: value (`Dict[tf.Variable]`): All the new bias attached to an LM head. """ if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_bias(value) except AttributeError: self.build_in_name_scope() lm_head.set_bias(value) def get_lm_head(self) -> keras.layers.Layer: """ The LM Head layer. This method must be overwritten by all the models that have a lm head. Return: `keras.layers.Layer`: The LM head layer if the model has one, None if not. """ return None def resize_token_embeddings( self, new_num_tokens: Optional[int] = None ) -> Union[keras.layers.Embedding, tf.Variable]: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens (`int`, *optional*): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens without doing anything. Return: `tf.Variable` or `keras.layers.Embedding`: Pointer to the input tokens of the model. """ # TODO (joao): flagged for replacement (by `_v2_resized_token_embeddings`) due to embeddings refactor # Run the new code path if the model has a keras embeddings layer if isinstance(self.get_input_embeddings(), keras.layers.Embedding): return self._v2_resized_token_embeddings(new_num_tokens) if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self._get_word_embedding_weight(self.get_input_embeddings()) model_embeds = self._resize_token_embeddings(new_num_tokens) # Update base model and current model config self.config.vocab_size = new_num_tokens return model_embeds def _v2_resized_token_embeddings(self, new_num_tokens: Optional[int] = None) -> keras.layers.Embedding: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. Arguments: new_num_tokens (`int`, *optional*): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens without doing anything. Return: `keras.layers.Embedding`: Pointer to the input tokens of the model. """ if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self.get_input_embeddings() model_embeds = self._v2_resize_token_embeddings(new_num_tokens) # Update base model and current model config self.config.vocab_size = new_num_tokens return model_embeds def _get_word_embedding_weight(model, embedding_layer): # TODO (joao): flagged for delection due to embeddings refactor # If the variable holds the weights themselves, return them if isinstance(embedding_layer, tf.Tensor): return embedding_layer # Otherwise, try to get them from the layer's attributes embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds # The reason why the attributes don't exist might be # because the model is not built, so retry getting # the argument after building the model model.build_in_name_scope() embeds = getattr(embedding_layer, "weight", None) if embeds is not None: return embeds embeds = getattr(embedding_layer, "decoder", None) if embeds is not None: return embeds return None def _resize_token_embeddings(self, new_num_tokens): # TODO (joao): flagged for replacement (by `_v2_resize_token_embeddings`) due to embeddings refactor old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings()) new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) # if word embeddings are not tied, make sure that lm head bias is resized as well if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) # if word embeddings are not tied, make sure that lm head decoder is resized as well if self.get_output_embeddings() is not None: old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _v2_resize_token_embeddings(self, new_num_tokens): old_embeddings = self.get_input_embeddings() new_embeddings = self._v2_get_resized_embeddings(old_embeddings, new_num_tokens) self.set_input_embeddings(new_embeddings) # If word embeddings are not tied, make sure that lm head bias is resized as well if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._v2_get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) # If word embeddings are not tied, make sure that lm head decoder is resized as well. tied_weights = self.get_input_embeddings() == self.get_output_embeddings() if self.get_output_embeddings() is not None and not tied_weights: old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) # TODO (joao): this one probably needs a v2 version with other models new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) return self.get_input_embeddings() def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): """ Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_bias (`tf.Variable`): Old lm head bias to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns None Return: `tf.Variable`: Pointer to the resized bias. """ # TODO (joao): flagged for replacement (by `_v2_get_resized_lm_head_bias`) due to embeddings refactor new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens] # initialize new bias if tf.math.greater(size_diff, 0): padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy] bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True) bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False) else: slice_from = [0] if first_dim is None else [0, 0] current_bias = tf.slice( weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape) ) bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True) new_bias = self.add_weight( shape=final_shape, initializer="zeros", trainable=True, name=weight.name.split(":")[0], ) init_bias = tf.where(bias_mask, current_bias, new_bias.value()) new_bias.assign(init_bias) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _v2_get_resized_lm_head_bias( self, old_lm_head_bias: Dict[str, tf.Variable], new_num_tokens: int ) -> Dict[str, tf.Tensor]: """ Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_bias (`Dict[str, tf.Variable]`): Old lm head bias to be resized. new_num_tokens (`int`): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. Return: `tf.Tensor`: Values for the resized bias. """ new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): # Determine the size difference (depending on the shape) first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens # Copy the old bias values to the new bias if old_num_tokens > new_num_tokens: new_bias = weight.value()[..., :new_num_tokens] else: padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] new_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape)) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens): """ Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_lm_head_decoder (`tf.Variable`): Old lm head decoder to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns None Return: `tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input ones. """ new_lm_head_decoder = old_lm_head_decoder is_input_output_equals = tf.reduce_any( self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder ) if old_lm_head_decoder is not None and not is_input_output_equals: old_embedding_dim = shape_list(old_lm_head_decoder)[1] decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens) new_lm_head_decoder = self.add_weight( shape=(new_num_tokens, old_embedding_dim), initializer="zeros", trainable=True, name=old_lm_head_decoder.name.split(":")[0], ) init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value()) new_lm_head_decoder.assign(init_decoder) return new_lm_head_decoder def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable: """ Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (`tf.Variable`): Old embeddings to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just returns a pointer to the input tokens `tf.Variable` module of the model without doing anything. Return: `tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is `None` """ # TODO (joao): flagged for replacement (by `_v2_get_resized_embeddings`) due to embeddings refactor old_embedding_dim = shape_list(old_embeddings)[1] init_range = getattr(self.config, "initializer_range", 0.02) embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens) new_embeddings = self.add_weight( name=old_embeddings.name.split(":")[0], shape=[new_num_tokens, old_embedding_dim], initializer=get_initializer(init_range), dtype=tf.float32, ) init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value()) new_embeddings.assign(init_embeddings) return new_embeddings def _v2_get_resized_embeddings( self, old_embeddings: keras.layers.Embedding, new_num_tokens: int ) -> keras.layers.Embedding: """ Build a resized Embedding layer from a provided Embedding layer. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. Args: old_embeddings (`keras.layers.Embedding`): Old embeddings to be resized. new_num_tokens (`int`, *optional*): New number of tokens in the embedding matrix. Return: `keras.layers.Embedding`: Resized Embedding layer. """ # Get the initialization range for the embeddings init_range = 0.02 # default value potential_initialization_variable_names = [ "initializer_range", # most common "initializer_factor", # e.g. T5 "init_std", # e.g BART ] for var_name in potential_initialization_variable_names: if hasattr(self.config, var_name): init_range = getattr(self.config, var_name) # Get a new (initialized) embeddings layer new_embeddings = keras.layers.Embedding( input_dim=new_num_tokens, output_dim=old_embeddings.output_dim, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=init_range), name=old_embeddings.embeddings.name[:-13], # exact same scoped name except "/embeddings:0" ) new_embeddings(tf.constant([[0]])) # Copy the old embeddings to the new embeddings if old_embeddings.input_dim >= new_num_tokens: init_embeddings = old_embeddings.embeddings[:new_num_tokens] else: init_embeddings = tf.concat( [old_embeddings.embeddings, new_embeddings.embeddings[old_embeddings.input_dim :]], axis=0 ) new_embeddings.embeddings.assign(init_embeddings) return new_embeddings def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. Arguments: heads_to_prune (`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ raise NotImplementedError def save_pretrained( self, save_directory, saved_model=False, version=1, push_to_hub=False, signatures=None, max_shard_size: Union[int, str] = "5GB", create_pr: bool = False, safe_serialization: bool = False, token: Optional[Union[str, bool]] = None, **kwargs, ): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the [`~TFPreTrainedModel.from_pretrained`] class method. Arguments: save_directory (`str`): Directory to which to save. Will be created if it doesn't exist. saved_model (`bool`, *optional*, defaults to `False`): If the model has to be saved in saved model format as well or not. version (`int`, *optional*, defaults to 1): The version of the saved model. A saved model needs to be versioned in order to be properly loaded by TensorFlow Serving as detailed in the official documentation https://www.tensorflow.org/tfx/serving/serving_basic push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). signatures (`dict` or `tf.function`, *optional*): Model's signature used for serving. This will be passed to the `signatures` argument of model.save(). max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). <Tip warning={true}> If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard which will be bigger than `max_shard_size`. </Tip> create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save the model using `safetensors` or the traditional TensorFlow way (that uses `h5`). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) if saved_model: # If `torch_dtype` is in the config with a torch dtype class as the value, we need to change it to string. # (Although TF doesn't care about this attribute, we can't just remove it or set it to `None`.) if getattr(self.config, "torch_dtype", None) is not None and not isinstance(self.config.torch_dtype, str): self.config.torch_dtype = str(self.config.torch_dtype).split(".")[1] if signatures is None: serving_default = self.serving.get_concrete_function(self.input_signature) if any(spec.dtype == tf.int32 for spec in self.input_signature.values()): int64_spec = { key: tf.TensorSpec( shape=spec.shape, dtype=tf.int64 if spec.dtype == tf.int32 else spec.dtype, name=spec.name ) for key, spec in self.input_signature.items() } int64_serving = self.serving.get_concrete_function(int64_spec) signatures = {"serving_default": serving_default, "int64_serving": int64_serving} else: signatures = serving_default saved_model_dir = os.path.join(save_directory, "saved_model", str(version)) self.save(saved_model_dir, include_optimizer=False, signatures=signatures) logger.info(f"Saved model created in {saved_model_dir}") # Save configuration file self.config.architectures = [self.__class__.__name__[2:]] # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=self.config) self.config.save_pretrained(save_directory) if self.can_generate(): self.generation_config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` weights_name = SAFE_WEIGHTS_NAME if safe_serialization else TF2_WEIGHTS_NAME output_model_file = os.path.join(save_directory, weights_name) shards, index = tf_shard_checkpoint(self.weights, max_shard_size, weights_name=weights_name) # Clean the folder from a previous save for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) # If we have a shard file that is not going to be replaced, we delete it, but only from the main process # in distributed settings to avoid race conditions. weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") if ( filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and filename not in shards.keys() ): os.remove(full_filename) if index is None: if safe_serialization: state_dict = {strip_model_name_and_prefix(w.name): w.value() for w in self.weights} safe_save_file(state_dict, output_model_file, metadata={"format": "tf"}) else: self.save_weights(output_model_file) logger.info(f"Model weights saved in {output_model_file}") else: save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else TF2_WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, save_index_file) # Save the index as well with open(save_index_file, "w", encoding="utf-8") as index_file: content = json.dumps(index, indent=2, sort_keys=True) + "\n" index_file.write(content) logger.info( f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) for shard_file, shard in shards.items(): if safe_serialization: shard_state_dict = {strip_model_name_and_prefix(w.name): w.value() for w in shard} safe_save_file( shard_state_dict, os.path.join(save_directory, shard_file), metadata={"format": "tf"} ) else: with h5py.File(os.path.join(save_directory, shard_file), mode="w") as shard_file: layers = [] for layer in sorted(shard, key=lambda x: x.name): if "model." in layer.name or len(layer.name.split("/")) == 1: layer_name = layer.name else: layer_name = "/".join(layer.name.split("/")[1:]) param_dset = shard_file.create_dataset( layer_name, layer.numpy().shape, dtype=layer.numpy().dtype ) param_dset[:] = layer.numpy() layers.append(layer_name.encode("utf8")) save_attributes_to_hdf5_group(shard_file, "layer_names", layers) if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token, ) @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None, cache_dir: Optional[Union[str, os.PathLike]] = None, ignore_mismatched_sizes: bool = False, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", use_safetensors: bool = None, **kwargs, ): r""" Instantiate a pretrained TF 2.0 model from a pre-trained model configuration. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str`, *optional*): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. This loading path is slower than converting the PyTorch model in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. - `None` if you are both providing the configuration and state dictionary (resp. with keyword arguments `config` and `state_dict`). model_args (sequence of positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. config (`Union[PretrainedConfig, str]`, *optional*): Can be either: - an instance of a class derived from [`PretrainedConfig`], - a string valid as input to [`~PretrainedConfig.from_pretrained`]. Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). - The model was saved using [`~TFPreTrainedModel.save_pretrained`] and is reloaded by supplying the save directory. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch state_dict save file (see docstring of `pretrained_model_name_or_path` argument). ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels). cache_dir (`str`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies: (`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (e.g., not try downloading the model). token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`. </Tip> mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. tf_to_pt_weight_rename (`Callable`, *optional*): A function that is called to transform the names of weights during the PyTorch to TensorFlow crossloading process. This is not necessary for most models, but is useful to allow composite models to be crossloaded correctly. use_safetensors (`bool`, *optional*, defaults to `None`): Whether or not to use `safetensors` checkpoints. Defaults to `None`. If not specified and `safetensors` is not installed, it will be set to `False`. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `**kwargs` will be directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, `kwargs` will be first passed to the configuration class initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that corresponds to a configuration attribute will be used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's `__init__` function. Examples: ```python >>> from transformers import BertConfig, TFBertModel >>> # Download model and configuration from huggingface.co and cache. >>> model = TFBertModel.from_pretrained("google-bert/bert-base-uncased") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model = TFBertModel.from_pretrained("./test/saved_model/") >>> # Update configuration during loading. >>> model = TFBertModel.from_pretrained("google-bert/bert-base-uncased", output_attentions=True) >>> assert model.config.output_attentions == True >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable). >>> config = BertConfig.from_json_file("./pt_model/my_pt_model_config.json") >>> model = TFBertModel.from_pretrained("./pt_model/my_pytorch_model.bin", from_pt=True, config=config) ```""" from_pt = kwargs.pop("from_pt", False) resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) use_auth_token = kwargs.pop("use_auth_token", None) trust_remote_code = kwargs.pop("trust_remote_code", None) _ = kwargs.pop("mirror", None) load_weight_prefix = kwargs.pop("load_weight_prefix", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) subfolder = kwargs.pop("subfolder", "") commit_hash = kwargs.pop("_commit_hash", None) tf_to_pt_weight_rename = kwargs.pop("tf_to_pt_weight_rename", None) # Not relevant for TF models _ = kwargs.pop("adapter_kwargs", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if trust_remote_code is True: logger.warning( "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" " ignored." ) user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True if use_safetensors is None and not is_safetensors_available(): use_safetensors = False # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, _commit_hash=commit_hash, **kwargs, ) else: model_kwargs = kwargs if commit_hash is None: commit_hash = getattr(config, "_commit_hash", None) # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # index of the files. is_sharded = False # Load model if pretrained_model_name_or_path is not None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if is_local: if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint in priority if from_pt archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)): # Load from a sharded PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) is_sharded = True elif use_safetensors is not False and os.path.isfile( os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) ): # Load from a safetensors checkpoint archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) elif use_safetensors is not False and os.path.isfile( os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) ): # Load from a sharded safetensors checkpoint archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) is_sharded = True elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME)): # Load from a sharded TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME) is_sharded = True # At this stage we don't have a weight file so we will raise an error. elif use_safetensors: raise EnvironmentError( f"Error no file named {SAFE_WEIGHTS_NAME} or {SAFE_WEIGHTS_INDEX_NAME} found in directory {pretrained_model_name_or_path}. " f"Please make sure that the model has been saved with `safe_serialization=True` or do not " f"set `use_safetensors=True`." ) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)) or os.path.isfile( os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) ): raise EnvironmentError( f"Error no file named {TF2_WEIGHTS_NAME} or {SAFE_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} " "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those " "weights." ) else: raise EnvironmentError( f"Error no file named {TF2_WEIGHTS_NAME}, {SAFE_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " f"{pretrained_model_name_or_path}." ) elif os.path.isfile(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path is_local = True elif os.path.isfile(pretrained_model_name_or_path + ".index"): archive_file = pretrained_model_name_or_path + ".index" is_local = True elif is_remote_url(pretrained_model_name_or_path): filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: # set correct filename if from_pt: filename = WEIGHTS_NAME elif use_safetensors is not False: filename = SAFE_WEIGHTS_NAME else: filename = TF2_WEIGHTS_NAME try: # Load from URL or cache if already cached cached_file_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "resume_download": resume_download, "local_files_only": local_files_only, "token": token, "user_agent": user_agent, "revision": revision, "subfolder": subfolder, "_raise_exceptions_for_gated_repo": False, "_raise_exceptions_for_missing_entries": False, "_commit_hash": commit_hash, } resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None # result when internet is up, the repo and revision exist, but the file does not. if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME: # Did not find the safetensors file, let's fallback to TF. # No support for sharded safetensors yet, so we'll raise an error if that's all we find. filename = TF2_WEIGHTS_NAME resolved_archive_file = cached_file( pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **cached_file_kwargs ) if resolved_archive_file is None and filename == TF2_WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME, **cached_file_kwargs ) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None and filename == WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs ) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None: # Otherwise, maybe there is a PyTorch or Flax model file. We try those to give a helpful error # message. has_file_kwargs = { "revision": revision, "proxies": proxies, "token": token, "cache_dir": cache_dir, "local_files_only": local_files_only, } if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs): is_sharded = True elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {TF2_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to" " load this model from those weights." ) else: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME}," f" {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}" ) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted # to the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" " from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}" ) if is_local: logger.info(f"loading weights file {archive_file}") resolved_archive_file = archive_file filename = resolved_archive_file.split(os.path.sep)[-1] else: logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") else: resolved_archive_file = None # We'll need to download and cache each checkpoint shard if the checkpoint is sharded. if is_sharded: # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( pretrained_model_name_or_path, resolved_archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, _commit_hash=commit_hash, ) safetensors_from_pt = False if filename == SAFE_WEIGHTS_NAME: with safe_open(resolved_archive_file, framework="tf") as f: safetensors_metadata = f.metadata() if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax", "mlx"]: raise OSError( f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata." " Make sure you save your model with the `save_pretrained` method." ) safetensors_from_pt = safetensors_metadata.get("format") == "pt" elif filename == SAFE_WEIGHTS_INDEX_NAME: with safe_open(resolved_archive_file[0], framework="tf") as f: safetensors_metadata = f.metadata() if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax", "mlx"]: raise OSError( f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata." " Make sure you save your model with the `save_pretrained` method." ) safetensors_from_pt = safetensors_metadata.get("format") == "pt" config.name_or_path = pretrained_model_name_or_path # composed models, *e.g.* TFRag, require special treatment when it comes to loading # pre-trained weights. if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None: model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name") # Instantiate model. model = cls(config, *model_args, **model_kwargs) if tf_to_pt_weight_rename is None and hasattr(model, "tf_to_pt_weight_rename"): # TODO Matt: This is a temporary workaround to allow weight renaming, but requires a method # to be defined for each class that requires a rename. We can probably just have a class-level # dict and a single top-level method or something and cut down a lot of boilerplate code tf_to_pt_weight_rename = model.tf_to_pt_weight_rename if from_pt: from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model # Load from a PyTorch checkpoint return load_pytorch_checkpoint_in_tf2_model( model, resolved_archive_file, allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, tf_to_pt_weight_rename=tf_to_pt_weight_rename, ) # we might need to extend the variable scope for composite models if load_weight_prefix is not None: with tf.compat.v1.variable_scope(load_weight_prefix): model.build_in_name_scope() # build the network with dummy inputs else: model.build_in_name_scope() # build the network with dummy inputs if safetensors_from_pt and not is_sharded: from .modeling_tf_pytorch_utils import load_pytorch_state_dict_in_tf2_model with safe_open(resolved_archive_file, framework="tf") as safetensors_archive: # Load from a PyTorch safetensors checkpoint # We load in TF format here because PT weights often need to be transposed, and this is much # faster on GPU. Loading as numpy and transposing on CPU adds several seconds to load times. return load_pytorch_state_dict_in_tf2_model( model, safetensors_archive, tf_inputs=False, # No need to build the model again allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, ignore_mismatched_sizes=ignore_mismatched_sizes, tf_to_pt_weight_rename=tf_to_pt_weight_rename, ) elif safetensors_from_pt: from .modeling_tf_pytorch_utils import load_sharded_pytorch_safetensors_in_tf2_model return load_sharded_pytorch_safetensors_in_tf2_model( model, resolved_archive_file, tf_inputs=False, allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, ignore_mismatched_sizes=ignore_mismatched_sizes, tf_to_pt_weight_rename=tf_to_pt_weight_rename, ) # 'by_name' allow us to do transfer learning by skipping/adding layers # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357 try: if is_sharded: for file in resolved_archive_file: os.path.isfile(file), f"Error retrieving files {file}" if filename == SAFE_WEIGHTS_INDEX_NAME: missing_keys, unexpected_keys, mismatched_keys = load_tf_sharded_weights_from_safetensors( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) else: missing_keys, unexpected_keys, mismatched_keys = load_tf_sharded_weights( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) else: # Handles both H5 and safetensors missing_keys, unexpected_keys, mismatched_keys = load_tf_weights( model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix, ) except OSError as e: try: with open(resolved_archive_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please install " "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " "you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise OSError( "Unable to load weights from h5 file. " "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. " ) if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) elif len(mismatched_keys) == 0: logger.warning( f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) if len(mismatched_keys) > 0: mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able" " to use it for predictions and inference." ) # If it is a model with generation capabilities, attempt to load the generation config if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained( pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) except OSError: logger.info( "Generation config file not found, using a generation config created from the model config." ) pass if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "mismatched_keys": mismatched_keys, } return model, loading_info return model def push_to_hub( self, repo_id: str, use_temp_dir: Optional[bool] = None, commit_message: Optional[str] = None, private: Optional[bool] = None, max_shard_size: Optional[Union[int, str]] = "10GB", token: Optional[Union[bool, str]] = None, # (`use_auth_token` is deprecated: we have to keep it here as we don't have **kwargs) use_auth_token: Optional[Union[bool, str]] = None, create_pr: bool = False, **base_model_card_args, ) -> str: """ Upload the model files to the 🤗 Model Hub while synchronizing a local clone of the repo in `repo_path_or_name`. Parameters: repo_id (`str`): The name of the repository you want to push your model to. It should contain your organization name when pushing to a given organization. use_temp_dir (`bool`, *optional*): Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to `True` if there is no directory named like `repo_id`, `False` otherwise. commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload model"`. private (`bool`, *optional*): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` is not specified. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. Examples: ```python from transformers import TFAutoModel model = TFAutoModel.from_pretrained("google-bert/bert-base-cased") # Push the model to your namespace with the name "my-finetuned-bert". model.push_to_hub("my-finetuned-bert") # Push the model to an organization with the name "my-finetuned-bert". model.push_to_hub("huggingface/my-finetuned-bert") ``` """ if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if "repo_path_or_name" in base_model_card_args: warnings.warn( "The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use " "`repo_id` instead." ) repo_id = base_model_card_args.pop("repo_path_or_name") # Deprecation warning will be sent after for repo_url and organization repo_url = base_model_card_args.pop("repo_url", None) organization = base_model_card_args.pop("organization", None) if os.path.isdir(repo_id): working_dir = repo_id repo_id = repo_id.split(os.path.sep)[-1] else: working_dir = repo_id.split("/")[-1] repo_id = self._create_repo( repo_id, private=private, token=token, repo_url=repo_url, organization=organization ) if use_temp_dir is None: use_temp_dir = not os.path.isdir(working_dir) with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir: files_timestamps = self._get_files_timestamps(work_dir) # Save all files. self.save_pretrained(work_dir, max_shard_size=max_shard_size) if hasattr(self, "history") and hasattr(self, "create_model_card"): # This is a Keras model and we might be able to fish out its History and make a model card out of it base_model_card_args = { "output_dir": work_dir, "model_name": Path(repo_id).name, } base_model_card_args.update(base_model_card_args) self.create_model_card(**base_model_card_args) self._upload_modified_files( work_dir, repo_id, files_timestamps, commit_message=commit_message, token=token, create_pr=create_pr, ) @classmethod def register_for_auto_class(cls, auto_class="TFAutoModel"): """ Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. <Tip warning={true}> This API is experimental and may have some slight breaking changes in the next releases. </Tip> Args: auto_class (`str` or `type`, *optional*, defaults to `"TFAutoModel"`): The auto class to register this new model with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class class TFConv1D(keras.layers.Layer): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`. """ def __init__(self, nf, nx, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.nf = nf self.nx = nx self.initializer_range = initializer_range def build(self, input_shape): if self.built: return self.built = True self.weight = self.add_weight( "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range) ) self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer()) def call(self, x): bz, sl = shape_list(x)[:2] x = tf.reshape(x, [-1, self.nx]) x = tf.matmul(x, self.weight) + self.bias x = tf.reshape(x, [bz, sl, self.nf]) return x class TFSharedEmbeddings(keras.layers.Layer): r""" Construct shared token embeddings. The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language modeling. Args: vocab_size (`int`): The size of the vocabulary, e.g., the number of unique tokens. hidden_size (`int`): The size of the embedding vectors. initializer_range (`float`, *optional*): The standard deviation to use when initializing the weights. If no value is provided, it will default to \\(1/\sqrt{hidden\_size}\\). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`. """ # TODO (joao): flagged for delection due to embeddings refactor def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = hidden_size**-0.5 if initializer_range is None else initializer_range warnings.warn( "`TFSharedEmbeddings` is scheduled for deletion in v4.32, use `keras.layers.Embedding` instead.", DeprecationWarning, ) def build(self, input_shape): """ Build shared token embedding layer Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ self.weight = self.add_weight( "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range) ) super().build(input_shape) def get_config(self): config = { "vocab_size": self.vocab_size, "hidden_size": self.hidden_size, "initializer_range": self.initializer_range, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor: """ Get token embeddings of inputs or decode final hidden state. Args: inputs (`tf.Tensor`): In embedding mode, should be an int64 tensor with shape `[batch_size, length]`. In linear mode, should be a float tensor with shape `[batch_size, length, hidden_size]`. mode (`str`, defaults to `"embedding"`): A valid value is either `"embedding"` or `"linear"`, the first one indicates that the layer should be used as an embedding layer, the second one that the layer should be used as a linear decoder. Returns: `tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape `[batch_size, length, embedding_size]`. In linear mode, the output is a float32 with shape `[batch_size, length, vocab_size]`. Raises: ValueError: if `mode` is not valid. Shared weights logic is adapted from [here](https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24). """ if mode == "embedding": return self._embedding(inputs) elif mode == "linear": return self._linear(inputs) else: raise ValueError(f"mode {mode} is not valid.") def _embedding(self, input_ids): """Applies embedding based on inputs tensor.""" return tf.gather(self.weight, input_ids) def _linear(self, inputs): """ Computes logits by running inputs through a linear layer. Args: inputs: A float32 tensor with shape [..., hidden_size] Returns: float32 tensor with shape [..., vocab_size]. """ first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size]) class TFSequenceSummary(keras.layers.Layer): """ Compute a single vector summary of a sequence hidden states. Args: config ([`PretrainedConfig`]): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: - `"last"` -- Take the last token hidden state (like XLNet) - `"first"` -- Take the first token hidden state (like Bert) - `"mean"` -- Take the mean of all tokens hidden states - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - `"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes (otherwise to `config.hidden_size`). - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, another string or `None` will add no activation. - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation to use to initialize the weights. kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the `__init__` of `keras.layers.Layer`. """ def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs): super().__init__(**kwargs) self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last" if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj if self.has_summary: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = keras.layers.Dense( num_classes, kernel_initializer=get_initializer(initializer_range), name="summary" ) self.has_activation = False activation_string = getattr(config, "summary_activation", None) if activation_string is not None: self.has_activation = True self.activation = get_tf_activation(activation_string) self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0 if self.has_first_dropout: self.first_dropout = keras.layers.Dropout(config.summary_first_dropout) self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0 if self.has_last_dropout: self.last_dropout = keras.layers.Dropout(config.summary_last_dropout) self.hidden_size = config.hidden_size def call(self, inputs, cls_index=None, training=False): if not isinstance(inputs, (dict, tuple, list)): hidden_states = inputs elif isinstance(inputs, (tuple, list)): hidden_states = inputs[0] cls_index = inputs[1] if len(inputs) > 1 else None assert len(inputs) <= 2, "Too many inputs." else: hidden_states = inputs.get("hidden_states") cls_index = inputs.get("cls_index", None) if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = tf.reduce_mean(hidden_states, axis=1) elif self.summary_type == "cls_index": hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims] if cls_index is None: cls_index = tf.fill( hidden_shape[:-2], hidden_shape[-2] - 1 ) # A tensor full of shape [batch] or [batch, num choices] full of sequence length cls_shape = shape_list(cls_index) if len(cls_shape) <= len(hidden_shape) - 2: cls_index = tf.expand_dims(cls_index, axis=-1) # else: # cls_index = cls_index[..., tf.newaxis] # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2) output = tf.squeeze( output, axis=len(hidden_shape) - 2 ) # shape of output: (batch, num choices, hidden_size) elif self.summary_type == "attn": raise NotImplementedError if self.has_first_dropout: output = self.first_dropout(output, training=training) if self.has_summary: output = self.summary(output) if self.has_activation: output = self.activation(output) if self.has_last_dropout: output = self.last_dropout(output, training=training) return output def build(self, input_shape): if self.built: return self.built = True if getattr(self, "summary", None) is not None: with tf.name_scope("summary"): self.summary.build(self.hidden_size) def get_initializer(initializer_range: float = 0.02) -> keras.initializers.TruncatedNormal: """ Creates a `keras.initializers.TruncatedNormal` with the given range. Args: initializer_range (*float*, defaults to 0.02): Standard deviation of the initializer range. Returns: `keras.initializers.TruncatedNormal`: The truncated normal initializer. """ return keras.initializers.TruncatedNormal(stddev=initializer_range)
transformers/src/transformers/modeling_tf_utils.py/0
{ "file_path": "transformers/src/transformers/modeling_tf_utils.py", "repo_id": "transformers", "token_count": 74552 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _import_structure = { "auto_factory": ["get_values"], "configuration_auto": ["CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"], "feature_extraction_auto": ["FEATURE_EXTRACTOR_MAPPING", "AutoFeatureExtractor"], "image_processing_auto": ["IMAGE_PROCESSOR_MAPPING", "AutoImageProcessor"], "processing_auto": ["PROCESSOR_MAPPING", "AutoProcessor"], "tokenization_auto": ["TOKENIZER_MAPPING", "AutoTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_auto"] = [ "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING", "MODEL_FOR_AUDIO_XVECTOR_MAPPING", "MODEL_FOR_BACKBONE_MAPPING", "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING", "MODEL_FOR_CAUSAL_LM_MAPPING", "MODEL_FOR_CTC_MAPPING", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", "MODEL_FOR_IMAGE_MAPPING", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING", "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING", "MODEL_FOR_KEYPOINT_DETECTION_MAPPING", "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING", "MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING", "MODEL_FOR_MASKED_LM_MAPPING", "MODEL_FOR_MASK_GENERATION_MAPPING", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "MODEL_FOR_OBJECT_DETECTION_MAPPING", "MODEL_FOR_PRETRAINING_MAPPING", "MODEL_FOR_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_TEXT_ENCODING_MAPPING", "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING", "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", "MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING", "MODEL_FOR_VISION_2_SEQ_MAPPING", "MODEL_FOR_RETRIEVAL_MAPPING", "MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING", "MODEL_MAPPING", "MODEL_WITH_LM_HEAD_MAPPING", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING", "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING", "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING", "AutoModel", "AutoBackbone", "AutoModelForAudioClassification", "AutoModelForAudioFrameClassification", "AutoModelForAudioXVector", "AutoModelForCausalLM", "AutoModelForCTC", "AutoModelForDepthEstimation", "AutoModelForImageClassification", "AutoModelForImageSegmentation", "AutoModelForImageToImage", "AutoModelForInstanceSegmentation", "AutoModelForKeypointDetection", "AutoModelForMaskGeneration", "AutoModelForTextEncoding", "AutoModelForMaskedImageModeling", "AutoModelForMaskedLM", "AutoModelForMultipleChoice", "AutoModelForNextSentencePrediction", "AutoModelForObjectDetection", "AutoModelForPreTraining", "AutoModelForQuestionAnswering", "AutoModelForSemanticSegmentation", "AutoModelForSeq2SeqLM", "AutoModelForSequenceClassification", "AutoModelForSpeechSeq2Seq", "AutoModelForTableQuestionAnswering", "AutoModelForTextToSpectrogram", "AutoModelForTextToWaveform", "AutoModelForTokenClassification", "AutoModelForUniversalSegmentation", "AutoModelForVideoClassification", "AutoModelForVision2Seq", "AutoModelForVisualQuestionAnswering", "AutoModelForDocumentQuestionAnswering", "AutoModelWithLMHead", "AutoModelForZeroShotImageClassification", "AutoModelForZeroShotObjectDetection", "AutoModelForImageTextToText", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_auto"] = [ "TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", "TF_MODEL_FOR_CAUSAL_LM_MAPPING", "TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", "TF_MODEL_FOR_MASK_GENERATION_MAPPING", "TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING", "TF_MODEL_FOR_MASKED_LM_MAPPING", "TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "TF_MODEL_FOR_PRETRAINING_MAPPING", "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", "TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", "TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", "TF_MODEL_FOR_TEXT_ENCODING_MAPPING", "TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", "TF_MODEL_FOR_VISION_2_SEQ_MAPPING", "TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING", "TF_MODEL_MAPPING", "TF_MODEL_WITH_LM_HEAD_MAPPING", "TFAutoModel", "TFAutoModelForAudioClassification", "TFAutoModelForCausalLM", "TFAutoModelForImageClassification", "TFAutoModelForMaskedImageModeling", "TFAutoModelForMaskedLM", "TFAutoModelForMaskGeneration", "TFAutoModelForMultipleChoice", "TFAutoModelForNextSentencePrediction", "TFAutoModelForPreTraining", "TFAutoModelForDocumentQuestionAnswering", "TFAutoModelForQuestionAnswering", "TFAutoModelForSemanticSegmentation", "TFAutoModelForSeq2SeqLM", "TFAutoModelForSequenceClassification", "TFAutoModelForSpeechSeq2Seq", "TFAutoModelForTableQuestionAnswering", "TFAutoModelForTextEncoding", "TFAutoModelForTokenClassification", "TFAutoModelForVision2Seq", "TFAutoModelForZeroShotImageClassification", "TFAutoModelWithLMHead", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_auto"] = [ "FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", "FLAX_MODEL_FOR_CAUSAL_LM_MAPPING", "FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", "FLAX_MODEL_FOR_MASKED_LM_MAPPING", "FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", "FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "FLAX_MODEL_FOR_PRETRAINING_MAPPING", "FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING", "FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", "FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", "FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", "FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", "FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING", "FLAX_MODEL_MAPPING", "FlaxAutoModel", "FlaxAutoModelForCausalLM", "FlaxAutoModelForImageClassification", "FlaxAutoModelForMaskedLM", "FlaxAutoModelForMultipleChoice", "FlaxAutoModelForNextSentencePrediction", "FlaxAutoModelForPreTraining", "FlaxAutoModelForQuestionAnswering", "FlaxAutoModelForSeq2SeqLM", "FlaxAutoModelForSequenceClassification", "FlaxAutoModelForSpeechSeq2Seq", "FlaxAutoModelForTokenClassification", "FlaxAutoModelForVision2Seq", ] if TYPE_CHECKING: from .auto_factory import get_values from .configuration_auto import CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig from .feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor from .image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor from .processing_auto import PROCESSOR_MAPPING, AutoProcessor from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING, MODEL_FOR_AUDIO_XVECTOR_MAPPING, MODEL_FOR_BACKBONE_MAPPING, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CTC_MAPPING, MODEL_FOR_DEPTH_ESTIMATION_MAPPING, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_IMAGE_MAPPING, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_KEYPOINT_DETECTION_MAPPING, MODEL_FOR_MASK_GENERATION_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_RETRIEVAL_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TEXT_ENCODING_MAPPING, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoBackbone, AutoModel, AutoModelForAudioClassification, AutoModelForAudioFrameClassification, AutoModelForAudioXVector, AutoModelForCausalLM, AutoModelForCTC, AutoModelForDepthEstimation, AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForImageTextToText, AutoModelForImageToImage, AutoModelForInstanceSegmentation, AutoModelForKeypointDetection, AutoModelForMaskedImageModeling, AutoModelForMaskedLM, AutoModelForMaskGeneration, AutoModelForMultipleChoice, AutoModelForNextSentencePrediction, AutoModelForObjectDetection, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSemanticSegmentation, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForSpeechSeq2Seq, AutoModelForTableQuestionAnswering, AutoModelForTextEncoding, AutoModelForTextToSpectrogram, AutoModelForTextToWaveform, AutoModelForTokenClassification, AutoModelForUniversalSegmentation, AutoModelForVideoClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, AutoModelForZeroShotImageClassification, AutoModelForZeroShotObjectDetection, AutoModelWithLMHead, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_auto import ( TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_TEXT_ENCODING_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING, TFAutoModel, TFAutoModelForAudioClassification, TFAutoModelForCausalLM, TFAutoModelForDocumentQuestionAnswering, TFAutoModelForImageClassification, TFAutoModelForMaskedImageModeling, TFAutoModelForMaskedLM, TFAutoModelForMaskGeneration, TFAutoModelForMultipleChoice, TFAutoModelForNextSentencePrediction, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSemanticSegmentation, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForSpeechSeq2Seq, TFAutoModelForTableQuestionAnswering, TFAutoModelForTextEncoding, TFAutoModelForTokenClassification, TFAutoModelForVision2Seq, TFAutoModelForZeroShotImageClassification, TFAutoModelWithLMHead, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_auto import ( FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_MASKED_LM_MAPPING, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, FLAX_MODEL_FOR_PRETRAINING_MAPPING, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, FLAX_MODEL_MAPPING, FlaxAutoModel, FlaxAutoModelForCausalLM, FlaxAutoModelForImageClassification, FlaxAutoModelForMaskedLM, FlaxAutoModelForMultipleChoice, FlaxAutoModelForNextSentencePrediction, FlaxAutoModelForPreTraining, FlaxAutoModelForQuestionAnswering, FlaxAutoModelForSeq2SeqLM, FlaxAutoModelForSequenceClassification, FlaxAutoModelForSpeechSeq2Seq, FlaxAutoModelForTokenClassification, FlaxAutoModelForVision2Seq, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/auto/__init__.py/0
{ "file_path": "transformers/src/transformers/models/auto/__init__.py", "repo_id": "transformers", "token_count": 8478 }
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/bamba/modular_bamba.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_bamba.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2024 IBM and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Optional, Tuple, Union import torch from torch import nn import transformers.models.jamba.modeling_jamba as modeling_jamba from transformers.activations import ACT2FN from ...cache_utils import Cache # we need __iter__ and __len__ of pkv from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_torchdynamo_compiling, logging, replace_return_docstrings, ) from ...utils.deprecation import deprecate_kwarg from ...utils.import_utils import ( is_causal_conv1d_available, is_mamba_2_ssm_available, ) from .configuration_bamba import BambaConfig if is_mamba_2_ssm_available(): from mamba_ssm.ops.triton.selective_state_update import selective_state_update from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined else: selective_state_update = None if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_update, causal_conv1d_fn = None, None logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "BambaConfig" # Adapted from transformers.models.jamba.modeling_jamba.HybridMambaAttentionDynamicCache for the v2 mixer class HybridMambaAttentionDynamicCache(modeling_jamba.HybridMambaAttentionDynamicCache): """ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache (which has a constant shape regardless of seq_len). This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states` and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`, while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors). For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors), while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`, and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`. """ def __init__(self, config: BambaConfig, batch_size, dtype=torch.float16, device=None): super().__init__(config, batch_size, dtype, device) self.layers_block_type = config.layers_block_type self.has_previous_state = False # only used by mamba conv_kernel_size = config.mamba_d_conv ssm_state_size = config.mamba_d_state self.conv_states = [] self.ssm_states = [] self.transformer_layers = [] for i in range(config.num_hidden_layers): if self.layers_block_type[i] == "mamba": self.conv_states += [ torch.zeros( batch_size, (config.mamba_expand * config.hidden_size + 2 * config.mamba_n_groups * ssm_state_size), conv_kernel_size, device=device, dtype=dtype, ) ] self.ssm_states += [ torch.zeros( batch_size, config.mamba_n_heads, config.mamba_d_head, ssm_state_size, device=device, dtype=dtype, ) ] else: self.conv_states += [torch.tensor([[]] * batch_size, device=device)] self.ssm_states += [torch.tensor([[]] * batch_size, device=device)] self.transformer_layers.append(i) self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] class BambaRotaryEmbedding(nn.Module): def __init__(self, config: BambaConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and config.rope_scaling is not None: self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences) """ seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: # growth inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len) self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset # This .to() is needed if the model has been moved to a device after being initialized (because # the buffer is automatically moved, but not the original copy) self.original_inv_freq = self.original_inv_freq.to(device) self.register_buffer("inv_freq", self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if "dynamic" in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) # Core RoPE block inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 (see https://github.com/huggingface/transformers/pull/29285) device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention cos = cos * self.attention_scaling sin = sin * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Adapted from transformers.models.glm.modular_glm.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Removes the interleaving of cos and sin from GLM Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) # Keep half or full tensor for later concatenation rotary_dim = cos.shape[-1] q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] # Apply rotary embeddings on the first half or full tensor q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin) k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin) # Concatenate back to full shape q_embed = torch.cat([q_embed, q_pass], dim=-1) k_embed = torch.cat([k_embed, k_pass], dim=-1) return q_embed, k_embed class BambaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: BambaConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class BambaRMSNormGated(torch.nn.Module): def __init__(self, hidden_size, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states, gate=None): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) if gate is not None: hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32)) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) # Helper methods for segment sum computation def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int): """ Padding x tensor with `pad_size` on the seq_len dim (dim=1) Assumes that we only have tensors of either size 4 or 3 """ pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0) return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0) def reshape_into_chunks(input_tensor, pad_size, chunk_size): """ Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and simultaneously splitting it into chunk sequences. Assumes that we only have tensors of either size 4 or 3 """ # [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...] input_tensor = pad_tensor_by_size(input_tensor, pad_size) if len(input_tensor.shape) == 3: # [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads] return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2]) else: # [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size] return input_tensor.reshape( input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3] ) def segment_sum(input_tensor): """ More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions. """ chunk_size = input_tensor.size(-1) # 1. expand input tensor to have an additional dimension and repeat along that dimension # [..., chunk_size] -> [..., chunk_size, chunk_size] input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size) # 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1) input_tensor = input_tensor.masked_fill(~mask, 0) # 3. compute actual cumsum tensor_segsum = torch.cumsum(input_tensor, dim=-2) # 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time) mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0) tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf) return tensor_segsum is_fast_path_available = all((selective_state_update, causal_conv1d_fn, causal_conv1d_update)) def apply_mask_to_padding_states(hidden_states, attention_mask): """ Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66 """ if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: dtype = hidden_states.dtype hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return hidden_states # Adapted from transformers.models.mamba2.modeling_mamba2.Mamba2Mixer class BambaMixer(nn.Module): """ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) The are a few differences between this and Mamba2Mixer: - The variable use_precomputed_states is slightly different due to the HybridCache structure - There's a few non-obvious bugs fixed with batching in the slow path that exist in main - Some extra variables that our layer doesn't need have been removed - We ported most of the refactors in https://github.com/huggingface/transformers/pull/35154, which is (as of Dec 18, 2024) unmerged """ def __init__(self, config: BambaConfig, layer_idx: int): super().__init__() self.num_heads = config.mamba_n_heads self.hidden_size = config.hidden_size self.ssm_state_size = config.mamba_d_state self.conv_kernel_size = config.mamba_d_conv self.intermediate_size = int(config.mamba_expand * self.hidden_size) self.layer_idx = layer_idx self.use_conv_bias = config.mamba_conv_bias self.activation = config.hidden_act self.act = ACT2FN[config.hidden_act] self.use_bias = config.mamba_proj_bias self.layer_norm_epsilon = config.rms_norm_eps self.n_groups = config.mamba_n_groups self.head_dim = config.mamba_d_head self.chunk_size = config.mamba_chunk_size # FIXME: self.time_step_limit = (0.0, float("inf")) self.time_step_min = 0.001 self.time_step_max = 0.1 self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size self.conv1d = nn.Conv1d( in_channels=self.conv_dim, out_channels=self.conv_dim, bias=config.mamba_conv_bias, kernel_size=self.conv_kernel_size, groups=self.conv_dim, padding=self.conv_kernel_size - 1, ) # projection of the input hidden states projection_size = self.intermediate_size + self.conv_dim + self.num_heads self.in_proj = nn.Linear( self.hidden_size, projection_size, bias=self.use_bias, ) # selective projection used to make dt, B and C input dependant # time step projection (discretization) # instantiate once and copy inv_dt in init_weights of PretrainedModel self.dt_bias = nn.Parameter(torch.ones(self.num_heads)) # S4D real initialization. These are not discretized! # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded A = torch.arange(1, self.num_heads + 1) self.A_log = nn.Parameter(torch.log(A)) self.A_log._no_weight_decay = True self.norm = BambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon) self.D = nn.Parameter(torch.ones(self.num_heads)) self.D._no_weight_decay = True self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias) if not is_fast_path_available: logger.warning_once( "The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`" " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and" " https://github.com/Dao-AILab/causal-conv1d" ) else: logger.warning_once("The fast path for Bamba will be used when running the model on a GPU") def cuda_kernels_forward( self, hidden_states: torch.Tensor, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): # 1. Gated MLP's linear projection hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask) projected_states = self.in_proj(hidden_states) # Set up dimensions for reshapes later batch_size, seq_len, _ = hidden_states.shape groups_time_state_size = self.n_groups * self.ssm_state_size use_precomputed_states = ( cache_params is not None and cache_params.has_previous_state and seq_len == 1 and cache_params.conv_states[self.layer_idx].shape[0] == cache_params.ssm_states[self.layer_idx].shape[0] == batch_size and cache_position is not None and cache_position[0] > 0 ) # getting projected states from cache if it exists if use_precomputed_states: gate, hidden_states_B_C, dt = projected_states.squeeze(1).split( [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation hidden_states_B_C = causal_conv1d_update( hidden_states_B_C, cache_params.conv_states[self.layer_idx], self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation, ) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1, ) # 3. SSM transformation A = -torch.exp(self.A_log.float()) # (nheads,) A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) dt = dt[:, :, None].expand(-1, -1, self.head_dim) dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim) D = self.D[:, None, ...].expand(-1, self.head_dim) B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups) C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups) hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim) hidden_states = selective_state_update( cache_params.ssm_states[self.layer_idx], hidden_states_reshaped, dt, A, B, C, D, z=None, dt_bias=dt_bias, dt_softplus=True, ) hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim) hidden_states = self.norm(hidden_states, gate) # 4. Final linear projection out = self.out_proj(hidden_states)[:, None, ...] # Fused calculations or step by step if no initialized cache is found else: A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size) dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit} # 2-4. Fused kernel for conv1d, SSM, and the final projection if self.training and cache_params is None: out = mamba_split_conv1d_scan_combined( projected_states, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.dt_bias, A, D=self.D, chunk_size=self.chunk_size, seq_idx=None, # was seq_idx activation=self.activation, rmsnorm_weight=self.norm.weight, rmsnorm_eps=self.norm.variance_epsilon, outproj_weight=self.out_proj.weight, outproj_bias=self.out_proj.bias, headdim=self.head_dim, ngroups=self.n_groups, norm_before_gate=False, return_final_states=False, **dt_limit_kwargs, ) else: gate, hidden_states_B_C, dt = projected_states.split( [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation # Init cache if cache_params is not None: # storing the states # If we just take xBC[:, :, -self.d_conv :], it will error if seqlen < self.d_conv # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise. hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) conv_states = nn.functional.pad( hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0), ) cache_params.conv_states[self.layer_idx].copy_(conv_states) if self.activation not in ["silu", "swish"]: hidden_states_B_C = self.act( self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2) ) else: hidden_states_B_C = causal_conv1d_fn( x=hidden_states_B_C.transpose(1, 2), weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation, ).transpose(1, 2) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1, ) # 3. SSM transformation scan_output, ssm_state = mamba_chunk_scan_combined( hidden_states.view(batch_size, seq_len, -1, self.head_dim), dt, A, B.view(batch_size, seq_len, self.n_groups, -1), C.view(batch_size, seq_len, self.n_groups, -1), chunk_size=self.chunk_size, D=self.D, z=None, seq_idx=None, return_final_states=True, dt_bias=self.dt_bias, dt_softplus=True, **dt_limit_kwargs, ) # Init cache if ssm_state is not None and cache_params is not None: cache_params.ssm_states[self.layer_idx].copy_(ssm_state) scan_output = scan_output.view(batch_size, seq_len, -1) # Multiply "gate" branch and apply extra normalization layer scan_output = self.norm(scan_output, gate) # 4. Final linear projection out = self.out_proj(scan_output) return out # fmt: off def torch_forward( self, input_states, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): batch_size, seq_len, _ = input_states.shape dtype = input_states.dtype # 1. Gated MLP's linear projection input_states = apply_mask_to_padding_states(input_states, attention_mask) projected_states = self.in_proj(input_states) gate, hidden_states_B_C, dt = projected_states.split( [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) use_precomputed_states = ( cache_params is not None and cache_params.has_previous_state and seq_len == 1 and cache_params.conv_states[self.layer_idx].shape[0] == cache_params.ssm_states[self.layer_idx].shape[0] == batch_size and cache_position is not None and cache_position[0] > 0 ) # 2. Convolution sequence transformation if use_precomputed_states: cache_params.conv_states[self.layer_idx] = cache_params.conv_states[self.layer_idx].roll(shifts=-1, dims=-1) cache_params.conv_states[self.layer_idx][:, :, -1] = hidden_states_B_C[:, 0, :].to(cache_params.conv_states[self.layer_idx].device) # We need to guarantee that anything regarding the cache is on the same device conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device) hidden_states_B_C = torch.sum( conv_states * self.conv1d.weight.squeeze(1), dim=-1 ) if self.use_conv_bias: hidden_states_B_C = hidden_states_B_C + self.conv1d.bias hidden_states_B_C = self.act(hidden_states_B_C) else: # Init cache if cache_params is not None: hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) conv_states = nn.functional.pad( hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0) ) cache_params.conv_states[self.layer_idx].copy_(conv_states) hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1 ) # 3. SSM transformation A = -torch.exp(self.A_log.float()) # [num_heads] if use_precomputed_states: # We need to guarantee that anything regarding the cache is on the same device cache_device = cache_params.ssm_states[self.layer_idx].device # Note: there is no need to pad parameter matrices here, as there is just one new token # for batched generation dt = dt[:, 0, :][:, None, ...] dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim) # [num_heads] -> [num_heads, head_dim] dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim) dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype)) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) # [bsz, num_heads, head_dim, state_size] dA = (torch.exp(dt[..., None] * A)).to(device=cache_device) # Discretize B # [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] -> # -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size] B = B.reshape(batch_size, self.n_groups, -1)[..., None, :] B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous() B = B.reshape(batch_size, -1, B.shape[-1]) # [bsz, num_heads, head_dim, state_size] dB = dt[..., None] * B[..., None, :] # Discretize x into dB # [bsz, intermediate_size] -> [bsz, num_heads, head_dim] hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim) dBx = (dB * hidden_states[..., None]).to(device=cache_device) # State calculation cache_params.ssm_states[self.layer_idx].copy_( cache_params.ssm_states[self.layer_idx] * dA + dBx ) # Subsequent output # [bsz, n_groups * state_size] -> [bsz, num_heads, state_size] C = C.reshape(batch_size, self.n_groups, -1)[..., None, :] C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous() C = C.reshape(batch_size, -1, C.shape[-1]) # [bsz, num_heads, head_dim] ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n] # Reshape ssm_states to merge the first two dimensions ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n] C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1] y = torch.bmm(ssm_states_reshaped, C_reshaped) y = y.view(batch_size, self.num_heads, self.head_dim) # D skip connection # [num_heads] -> [num_heads, head_dim] D = self.D[..., None].expand(self.D.shape[0], self.head_dim) y = (y + hidden_states * D).to(y.dtype) # [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size] y = y.reshape(batch_size, -1)[:, None, ...] else: # begin ssd naive implementation without einsums dt = nn.functional.softplus(dt + self.dt_bias) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float() B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() B = B.repeat(1, 1, self.num_heads // self.n_groups, 1) C = C.repeat(1, 1, self.num_heads // self.n_groups, 1) pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size) # Discretize x and A hidden_states = hidden_states * dt[..., None] A = A.to(hidden_states.dtype) * dt # Rearrange into blocks/chunks hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)] # [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size] A = A.permute(0, 3, 1, 2) A_cumsum = torch.cumsum(A, dim=-1) # 1. Compute the output for each intra-chunk (diagonal blocks) # This is the analog of a causal mask L = torch.exp(segment_sum(A)) # Contraction of C and B to get G (attention-weights like) G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n) G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h) # Compute M, equivalent to applying attention mask to weights M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None] M = M_intermediate.sum(dim=-1) # Compute Y_diag (apply to values) Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3) # 2. Compute the state for each intra-chunk # (right term of low-rank factorization of off-diagonal blocks; B terms) decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum)) B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None] states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2) # 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries # (middle term of factorization of off-diag blocks; A terms) if use_precomputed_states: previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device) else: previous_states = torch.zeros_like(states[:, :1]) states = torch.cat([previous_states, states], dim=1) decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0)))) decay_chunk = decay_chunk.transpose(1, 3) new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1) states, ssm_state = new_states[:, :-1], new_states[:, -1] # 4. Compute state -> output conversion per chunk # (left term of low-rank factorization of off-diagonal blocks; C terms) state_decay_out = torch.exp(A_cumsum) C_times_states = (C[..., None, :] * states[:, :, None, ...]) state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1) Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None]) # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks) y = Y_diag + Y_off # [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim] y = y.reshape(batch_size, -1, self.num_heads, self.head_dim) y = y + D_residual # Cutting off padded chunks if pad_size > 0: y = y[:, :seq_len, :, :] y = y.reshape(batch_size, seq_len, -1) # Init cache if ssm_state is not None and cache_params is not None: cache_params.ssm_states[self.layer_idx].copy_(ssm_state) scan_output = self.norm(y, gate) # end ssd naive # 4. Final linear projection contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size] return contextualized_states # fmt: on def forward( self, hidden_states, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): if is_fast_path_available and "cuda" in self.in_proj.weight.device.type: return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask) dtype = hidden_states.dtype if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66 hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask) class BambaMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj class BambaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ BambaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class BambaDecoderLayer(nn.Module): def __init__(self, config: BambaConfig, layer_idx: int, layer_type: str = "mamba"): super().__init__() num_experts = 1 ffn_layer_class = BambaMLP if num_experts == 1 else None self.feed_forward = ffn_layer_class(config) self.input_layernorm = BambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.pre_ff_layernorm = BambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.layer_type = layer_type if layer_type == "mamba": self.mamba = BambaMixer(config=config, layer_idx=layer_idx) elif layer_type == "attention": self.self_attn = BambaAttention(config, layer_idx) else: raise ValueError("Invalid layer_type") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # this is a hybrid decoder layer if self.layer_type == "mamba": hidden_states = self.mamba( hidden_states=hidden_states, cache_params=past_key_value, cache_position=cache_position, attention_mask=attention_mask, ) self_attn_weights = None elif self.layer_type == "attention": hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) # residual connection after attention hidden_states = residual + hidden_states # feed-forward residual = hidden_states hidden_states = self.pre_ff_layernorm(hidden_states) hidden_states = self.feed_forward(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs BAMBA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BambaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare BambaModel outputting raw hidden-states without any specific head on top.", BAMBA_START_DOCSTRING, ) class BambaPreTrainedModel(PreTrainedModel): config_class = BambaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["BambaDecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True # Note: only supports HybridMambaAttentionDynamicCache _is_stateful = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() BAMBA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`HybridMambaAttentionDynamicCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): A HybridMambaAttentionDynamicCache object containing pre-computed hidden-states (keys and values in the self-attention blocks and convolution and ssm states in the mamba blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. Key and value cache tensors have shape `(batch_size, num_heads, seq_len, head_dim)`. Convolution and ssm states tensors have shape `(batch_size, d_inner, d_conv)` and `(batch_size, d_inner, d_state)` respectively. See the `HybridMambaAttentionDynamicCache` class for more details. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ @add_start_docstrings( "The bare Bamba Model outputting raw hidden-states without any specific head on top.", BAMBA_START_DOCSTRING, ) # Adapted from transformers.models.jamba.modeling_jamba.JambaModel class BambaModel(BambaPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`BambaDecoderLayer`] Args: config: BambaConfig """ def __init__(self, config: BambaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) decoder_layers = [] for i in range(config.num_hidden_layers): decoder_layers.append(BambaDecoderLayer(config, layer_idx=i, layer_type=config.layers_block_type[i])) self.layers = nn.ModuleList(decoder_layers) self._attn_implementation = config._attn_implementation self.final_layernorm = BambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = BambaRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(BAMBA_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[HybridMambaAttentionDynamicCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, # NOOP kwargs, for now ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds if use_cache and past_key_values is None: logger.warning_once( "Bamba requires an initialized `HybridMambaAttentionDynamicCache` to return a cache. None was " "provided, so no cache will be returned." ) if cache_position is None: cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) mamba_mask = self._update_mamba_mask(attention_mask, cache_position) # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: # Depending on the layer type we opt for 2D base attention mask (Mamba) or 4D causal mask (Attention) layer_mask = mamba_mask if decoder_layer.layer_type == "mamba" else causal_mask if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, layer_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=layer_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] if output_attentions: if layer_outputs[1] is not None: # append attentions only of attention layers. Mamba layers return `None` as the attention weights all_self_attns += (layer_outputs[1],) hidden_states = self.final_layernorm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if past_key_values and not past_key_values.has_previous_state: past_key_values.has_previous_state = True next_cache = None if not use_cache else past_key_values if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) def _update_causal_mask( self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: HybridMambaAttentionDynamicCache, output_attentions: bool, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype, device = input_tensor.dtype, input_tensor.device sequence_length = input_tensor.shape[1] target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to plcae the 4D attention mask on. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_attention_mask = (attention_mask[:, None, None, :] == attention_mask[:, None, :, None])[ :, :, -sequence_length:, : ].to(dtype) padding_mask = causal_mask[:, :, :, :mask_length] + padding_attention_mask padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask def _update_mamba_mask(self, attention_mask, cache_position): """ No need for zeroing states when 1. Cached forward 2. Attending to all inputs """ mamba_mask = attention_mask if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)): mamba_mask = None return mamba_mask class BambaForCausalLM(BambaPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} def __init__(self, config): super().__init__(config) self.model = BambaModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep") @add_start_docstrings_to_model_forward(BAMBA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[HybridMambaAttentionDynamicCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. logits_to_keep (`int` or `torch.Tensor`, *optional*): If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that token can save memory, which becomes pretty significant for long sequences or large vocabulary size. If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension. This is useful when using packed tensor format (single dimension for batch and sequence length). Returns: Example: ```python >>> from transformers import AutoTokenizer, BambaForCausalLM >>> model = BambaForCausalLM.from_pretrained("...") >>> tokenizer = AutoTokenizer.from_pretrained("...") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs, ): # Overwitten -- has a unique cache type, `HybridMambaAttentionDynamicCache` empty_past_kv = past_key_values is None # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens # Exception 1: when passing input_embeds, input_ids may be missing entries # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here # Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case. # (we can't check exception 3 while compiling) if not empty_past_kv: if ( inputs_embeds is not None # Exception 1 or (is_torchdynamo_compiling() or cache_position[-1] >= input_ids.shape[1]) # Exception 3 ): input_ids = input_ids[:, -cache_position.shape[0] :] elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2) input_ids = input_ids[:, cache_position] else: past_key_values = HybridMambaAttentionDynamicCache( self.config, input_ids.shape[0], self.dtype, device=self.device ) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if not empty_past_kv: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and empty_past_kv: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, "attention_mask": attention_mask, "logits_to_keep": self.config.num_logits_to_keep, "cache_position": cache_position, } ) return model_inputs __all__ = ["BambaModel", "BambaForCausalLM", "BambaPreTrainedModel"]
transformers/src/transformers/models/bamba/modeling_bamba.py/0
{ "file_path": "transformers/src/transformers/models/bamba/modeling_bamba.py", "repo_id": "transformers", "token_count": 34250 }
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Huggingface Pytorch checkpoint to Tensorflow checkpoint.""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str): """ Args: model: BertModel Pytorch model instance to be converted ckpt_dir: Tensorflow model directory model_name: model name Currently supported HF models: - Y BertModel - N BertForMaskedLM - N BertForPreTraining - N BertForMultipleChoice - N BertForNextSentencePrediction - N BertForSequenceClassification - N BertForQuestionAnswering """ tensors_to_transpose = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") var_map = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(ckpt_dir): os.makedirs(ckpt_dir) state_dict = model.state_dict() def to_tf_var_name(name: str): for patt, repl in iter(var_map): name = name.replace(patt, repl) return f"bert/{name}" def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session): tf_dtype = tf.dtypes.as_dtype(tensor.dtype) tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer()) session.run(tf.variables_initializer([tf_var])) session.run(tf_var) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: tf_name = to_tf_var_name(var_name) torch_tensor = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose): torch_tensor = torch_tensor.T tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session) tf_var.assign(tf.cast(torch_tensor, tf_var.dtype)) tf_weight = session.run(tf_var) print(f"Successfully created {tf_name}: {np.allclose(tf_weight, torch_tensor)}") saver = tf.train.Saver(tf.trainable_variables()) saver.save(session, os.path.join(ckpt_dir, model_name.replace("-", "_") + ".ckpt")) def main(raw_args=None): parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, required=True, help="model name e.g. google-bert/bert-base-uncased") parser.add_argument( "--cache_dir", type=str, default=None, required=False, help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path", type=str, required=True, help="/path/to/<pytorch-model-name>.bin") parser.add_argument("--tf_cache_dir", type=str, required=True, help="Directory in which to save tensorflow model") args = parser.parse_args(raw_args) model = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path), cache_dir=args.cache_dir, ) convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name) if __name__ == "__main__": main()
transformers/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py/0
{ "file_path": "transformers/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py", "repo_id": "transformers", "token_count": 1660 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert BLIP-2 checkpoints from the original repository. URL: https://github.com/salesforce/LAVIS/tree/main/projects/blip2 """ import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install -U git+https://github.com/nielsrogge/LAVIS.git@blip2_float32 # to make sure we can compare both original and HF implementation in float32 from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BertTokenizer, Blip2Config, Blip2ForConditionalGeneration, Blip2ForImageTextRetrieval, Blip2Processor, Blip2QFormerConfig, Blip2VisionConfig, BlipImageProcessor, OPTConfig, T5Config, set_seed, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def load_demo_image(): url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") return image # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, model_name): rename_keys = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding")) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding")) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight")) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias")) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight")) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias")) for i in range(config.vision_config.num_hidden_layers): rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight")) rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias")) rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight")) rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias")) rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight")) rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",)) rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias")) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight")) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias")) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight")) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias")) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight")) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias")) if "itm" in model_name: rename_keys.append(("Qformer.bert.embeddings.word_embeddings.weight", "embeddings.word_embeddings.weight")) rename_keys.append(("Qformer.bert.embeddings.position_embeddings.weight", "embeddings.position_embeddings.weight")) rename_keys.append(("vision_proj.weight", "vision_projection.weight")) rename_keys.append(("vision_proj.bias", "vision_projection.bias")) rename_keys.append(("text_proj.weight", "text_projection.weight")) rename_keys.append(("text_proj.bias", "text_projection.bias")) # fmt: on return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def read_in_q_v_bias(state_dict, config): for i in range(config.vision_config.num_hidden_layers): # read in original q and v biases q_bias = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias") v_bias = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias") # next, set bias in the state dict qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) state_dict[f"vision_model.encoder.layers.{i}.self_attn.qkv.bias"] = qkv_bias def get_blip2_config(model_name, eos_token_id): image_size = 364 if "coco" in model_name else 224 vision_config = Blip2VisionConfig(image_size=image_size).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: text_config = OPTConfig.from_pretrained("facebook/opt-2.7b", eos_token_id=eos_token_id).to_dict() elif "opt-6.7b" in model_name: text_config = OPTConfig.from_pretrained("facebook/opt-6.7b", eos_token_id=eos_token_id).to_dict() elif "t5-xl" in model_name: text_config = T5Config.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1).to_dict() elif "t5-xxl" in model_name: text_config = T5Config.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1).to_dict() elif "itm" in model_name: text_config = {} else: raise ValueError("Model name not supported") if "itm" in model_name: config = Blip2Config( vision_config=vision_config, qformer_config=Blip2QFormerConfig(vocab_size=30523, use_qformer_text_input=True).to_dict(), ) else: config = Blip2Config(vision_config=vision_config, text_config=text_config) return config, image_size @torch.no_grad() def convert_blip2_checkpoint( model_name, pytorch_dump_folder_path=None, push_to_hub=False, lavis_device="cpu", hf_model_device="cpu" ): """ Copy/paste/tweak model's weights to Transformers design. """ if "opt" in model_name: tokenizer = AutoTokenizer.from_pretrained("facebook/opt-2.7b") elif "itm" in model_name: tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", truncation_side="right") tokenizer.add_special_tokens({"bos_token": "[DEC]"}) else: tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xl") if "itm" in model_name: eos_token_id = None else: eos_token_id = tokenizer("\n", add_special_tokens=False).input_ids[0] config, image_size = get_blip2_config(model_name, eos_token_id=eos_token_id) if "itm" in model_name: hf_model = Blip2ForImageTextRetrieval(config).eval() else: hf_model = Blip2ForConditionalGeneration(config).eval() model_name_to_original = { "blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"), "blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"), "blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"), "blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"), "blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"), "blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"), "blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"), "blip2-itm-vit-g": ("blip2_image_text_matching", "pretrain"), "blip2-itm-vit-g-coco": ("blip2_image_text_matching", "coco"), } name, type = model_name_to_original[model_name] # load original model print("Loading original model...") original_model, vis_processors, _ = load_model_and_preprocess( name=name, model_type=type, is_eval=True, device=lavis_device ) original_model.eval() print("Done!") # update state dict keys state_dict = original_model.state_dict() rename_keys = create_rename_keys(config, model_name) for src, dest in rename_keys: rename_key(state_dict, src, dest) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): val = state_dict.pop(key) if key.startswith("Qformer.bert"): key = key.replace("Qformer.bert", "qformer") if "attention.self" in key: key = key.replace("self", "attention") if "opt_proj" in key: key = key.replace("opt_proj", "language_projection") if "t5_proj" in key: key = key.replace("t5_proj", "language_projection") if key.startswith("opt"): key = key.replace("opt", "language") if key.startswith("t5"): key = key.replace("t5", "language") state_dict[key] = val # read in qv biases read_in_q_v_bias(state_dict, config) missing_keys, unexpected_keys = hf_model.load_state_dict(state_dict, strict=False) assert len(missing_keys) == 0 if "itm" in model_name: unexpected_keys = list(filter(lambda x: not x.startswith("Qformer.cls"), unexpected_keys)) assert unexpected_keys == ["temp", "qformer.embeddings.position_ids"] else: assert unexpected_keys == ["qformer.embeddings.position_ids"] image = load_demo_image() original_pixel_values = vis_processors["eval"](image).unsqueeze(0).to(lavis_device) # create processor image_processor = BlipImageProcessor( size={"height": image_size, "width": image_size}, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD ) processor = Blip2Processor(image_processor=image_processor, tokenizer=tokenizer) pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(hf_model_device) # make sure processor creates exact same pixel values assert torch.allclose(pixel_values, original_pixel_values.to(pixel_values.device)) original_model.to(lavis_device) hf_model.to(hf_model_device) if "itm" in model_name: caption = "a large fountain spewing water into the air" input_ids = tokenizer([caption], return_tensors="pt").input_ids.to(hf_model_device) attention_mask = processor(text=caption, return_tensors="pt").attention_mask.to(hf_model_device) with torch.no_grad(): original_logits = original_model( {"image": original_pixel_values, "text_input": [caption]}, match_head="itm" ) logits = hf_model( pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, use_image_text_matching_head=True, ) assert original_logits.shape == logits.logits_per_image.shape print("First values of original logits:", original_logits[0, :3]) print("First values of HF logits:", logits.logits_per_image[0, :3]) # assert values # cast to same type target_dtype = logits.logits_per_image.dtype assert torch.allclose(original_logits.to(target_dtype), logits.logits_per_image, atol=1e-4) original_itm_scores = torch.nn.functional.softmax(original_logits, dim=1) itm_scores = torch.nn.functional.softmax(logits.logits_per_image, dim=1) assert torch.allclose(original_itm_scores.to(target_dtype), itm_scores, atol=1e-4) print("Looks ok!") with torch.no_grad(): original_logits = original_model( {"image": original_pixel_values, "text_input": [caption]}, match_head="itc" ) logits = hf_model( pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, use_image_text_matching_head=False, ) assert original_logits.shape == logits.logits_per_image.shape print("First values of original logits:", original_logits[0, :3]) print("First values of HF logits:", logits.logits_per_image[0, :3]) # assert values # cast to same type target_dtype = logits.logits_per_image.dtype assert torch.allclose(original_logits.to(target_dtype), logits.logits_per_image, atol=1e-4) print("Looks ok!") else: input_ids = tokenizer(["\n"], return_tensors="pt").input_ids.to(hf_model_device) with torch.no_grad(): if "opt" in model_name: original_logits = original_model({"image": original_pixel_values, "text_input": [""]}).logits logits = hf_model(pixel_values, input_ids).logits else: original_logits = original_model( {"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits labels = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100) logits = hf_model(pixel_values, input_ids, labels=labels).logits assert original_logits.shape == logits.shape print("First values of original logits:", original_logits[0, :3, :3]) print("First values of HF logits:", logits[0, :3, :3]) # assert values assert torch.allclose(original_logits.to(logits.device), logits, atol=1e-4) print("Looks ok!") print("Generating a caption...") prompt = "Question: what object is in this image? Answer:" input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(hf_model_device) set_seed(42) original_outputs = original_model.generate( {"image": original_pixel_values, "prompt": prompt}, use_nucleus_sampling=True, max_length=50 ) outputs = hf_model.generate( pixel_values, input_ids, do_sample=True, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1, ) output_text = processor.batch_decode(outputs, skip_special_tokens=True) output_text = [text.strip() for text in output_text] print("Original generation:", original_outputs) print("HF generation:", output_text) if pytorch_dump_folder_path is not None: processor.save_pretrained(pytorch_dump_folder_path) hf_model.save_pretrained(pytorch_dump_folder_path) if push_to_hub: processor.push_to_hub(f"nielsr/{model_name}") hf_model.push_to_hub(f"nielsr/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() choices = [ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", "blip2-itm-vit-g", "blip2-itm-vit-g-coco", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) # note: this script is tested on 2 GPUs, as models are compared in float32, # which requires quite some memory. Hence loading both on a # separate device is the easiest to compare parser.add_argument( "--lavis_device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) parser.add_argument( "--hf_model_device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) args = parser.parse_args() convert_blip2_checkpoint( args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.lavis_device, args.hf_model_device )
transformers/src/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py", "repo_id": "transformers", "token_count": 7314 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Bros checkpoints.""" import argparse import bros # original repo import torch from transformers import BrosConfig, BrosModel, BrosProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_configs(model_name): bros_config = BrosConfig.from_pretrained(model_name) return bros_config def remove_ignore_keys_(state_dict): ignore_keys = [ "embeddings.bbox_sinusoid_emb.inv_freq", ] for k in ignore_keys: state_dict.pop(k, None) def rename_key(name): if name == "embeddings.bbox_projection.weight": name = "bbox_embeddings.bbox_projection.weight" if name == "embeddings.bbox_sinusoid_emb.x_pos_emb.inv_freq": name = "bbox_embeddings.bbox_sinusoid_emb.x_pos_emb.inv_freq" if name == "embeddings.bbox_sinusoid_emb.y_pos_emb.inv_freq": name = "bbox_embeddings.bbox_sinusoid_emb.y_pos_emb.inv_freq" return name def convert_state_dict(orig_state_dict, model): # rename keys for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) orig_state_dict[rename_key(key)] = val # remove ignore keys remove_ignore_keys_(orig_state_dict) return orig_state_dict def convert_bros_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): # load original model original_model = bros.BrosModel.from_pretrained(model_name).eval() # load HuggingFace Model bros_config = get_configs(model_name) model = BrosModel.from_pretrained(model_name, config=bros_config) model.eval() state_dict = original_model.state_dict() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) # verify results # original BROS model require 4 points (8 float values) for each bbox, prepare bbox with [batch_size, seq_len, 8] shape bbox = torch.tensor( [ [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.4396, 0.6720, 0.4659, 0.6720, 0.4659, 0.6850, 0.4396, 0.6850], [0.4698, 0.6720, 0.4843, 0.6720, 0.4843, 0.6850, 0.4698, 0.6850], [0.4698, 0.6720, 0.4843, 0.6720, 0.4843, 0.6850, 0.4698, 0.6850], [0.2047, 0.6870, 0.2730, 0.6870, 0.2730, 0.7000, 0.2047, 0.7000], [0.2047, 0.6870, 0.2730, 0.6870, 0.2730, 0.7000, 0.2047, 0.7000], [1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000], ] ] ) processor = BrosProcessor.from_pretrained(model_name) encoding = processor("His name is Rocco.", return_tensors="pt") encoding["bbox"] = bbox original_hidden_states = original_model(**encoding).last_hidden_state # pixel_values = processor(image, return_tensors="pt").pixel_values last_hidden_states = model(**encoding).last_hidden_state assert torch.allclose(original_hidden_states, last_hidden_states, atol=1e-4) if pytorch_dump_folder_path is not None: print(f"Saving model and processor to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub("jinho8345/" + model_name.split("/")[-1], commit_message="Update model") processor.push_to_hub("jinho8345/" + model_name.split("/")[-1], commit_message="Update model") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="jinho8345/bros-base-uncased", required=False, type=str, help="Name of the original model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, required=False, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub.", ) args = parser.parse_args() convert_bros_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/bros/convert_bros_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/bros/convert_bros_to_pytorch.py", "repo_id": "transformers", "token_count": 2040 }
# coding=utf-8 # Copyright Google AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for CANINE.""" from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) # Unicode defines 1,114,112 total “codepoints” UNICODE_VOCAB_SIZE = 1114112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py PAD = 0 CLS = 0xE000 SEP = 0xE001 BOS = 0xE002 MASK = 0xE003 RESERVED = 0xE004 # Maps special codepoints to human-readable names. SPECIAL_CODEPOINTS: Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. SPECIAL_CODEPOINTS_BY_NAME: Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class CanineTokenizer(PreTrainedTokenizer): r""" Construct a CANINE tokenizer (i.e. a character splitter). It turns text into a sequence of characters, and then converts each character into its Unicode code point. [`CanineTokenizer`] inherits from [`PreTrainedTokenizer`]. Refer to superclass [`PreTrainedTokenizer`] for usage examples and documentation concerning parameters. Args: model_max_length (`int`, *optional*, defaults to 2048): The maximum sentence length the model accepts. """ def __init__( self, bos_token=chr(CLS), eos_token=chr(SEP), sep_token=chr(SEP), cls_token=chr(CLS), pad_token=chr(PAD), mask_token=chr(MASK), add_prefix_space=False, model_max_length=2048, **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token # Creates a mapping for looking up the IDs of special symbols. self._special_codepoints: Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): self._special_codepoints[name] = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. self._special_codepoint_strings: Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } self._unicode_vocab_size = UNICODE_VOCAB_SIZE self._num_special_tokens = len(self._special_codepoints) super().__init__( bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, model_max_length=model_max_length, **kwargs, ) @property def vocab_size(self) -> int: return self._unicode_vocab_size def get_vocab(self): vocab = {chr(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: """Tokenize a string (i.e. perform character splitting).""" return list(text) def _convert_token_to_id(self, token: str) -> int: """Converts a token (i.e. a Unicode character) in an id (i.e. its integer Unicode code point value).""" try: return ord(token) except TypeError: raise ValueError(f"invalid token: '{token}'") def _convert_id_to_token(self, index: int) -> str: """ Converts a Unicode code point (integer) in a token (str). In case it's a special code point, convert to human-readable format. """ try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(index) except TypeError: raise ValueError(f"invalid id: {index}") def convert_tokens_to_string(self, tokens): return "".join(tokens) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CANINE sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] result = cls + token_ids_0 + sep if token_ids_1 is not None: result += token_ids_1 + sep return result def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) result = [1] + ([0] * len(token_ids_0)) + [1] if token_ids_1 is not None: result += ([0] * len(token_ids_1)) + [1] return result def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A CANINE sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] result = len(cls + token_ids_0 + sep) * [0] if token_ids_1 is not None: result += len(token_ids_1 + sep) * [1] return result # CanineTokenizer has no vocab file def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None): return () __all__ = ["CanineTokenizer"]
transformers/src/transformers/models/canine/tokenization_canine.py/0
{ "file_path": "transformers/src/transformers/models/canine/tokenization_canine.py", "repo_id": "transformers", "token_count": 3904 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import re from laion_clap import CLAP_Module from transformers import AutoFeatureExtractor, ClapConfig, ClapModel KEYS_TO_MODIFY_MAPPING = { "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } processor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc") def init_clap(checkpoint_path, model_type, enable_fusion=False): model = CLAP_Module( amodel=model_type, enable_fusion=enable_fusion, ) model.load_ckpt(checkpoint_path) return model def get_config_from_original(clap_model): audio_config = { "patch_embeds_hidden_size": clap_model.model.audio_branch.embed_dim, "depths": clap_model.model.audio_branch.depths, "hidden_size": clap_model.model.audio_projection[0].in_features, } text_config = {"hidden_size": clap_model.model.text_branch.pooler.dense.in_features} return ClapConfig(audio_config=audio_config, text_config=text_config) def rename_state_dict(state_dict): model_state_dict = {} sequential_layers_pattern = r".*sequential.(\d+).*" text_projection_pattern = r".*_projection.(\d+).*" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) if re.match(sequential_layers_pattern, key): # replace sequential layers with list sequential_layer = re.match(sequential_layers_pattern, key).group(1) key = key.replace(f"sequential.{sequential_layer}.", f"layers.{int(sequential_layer)//3}.linear.") elif re.match(text_projection_pattern, key): projecton_layer = int(re.match(text_projection_pattern, key).group(1)) # Because in CLAP they use `nn.Sequential`... transformers_projection_layer = 1 if projecton_layer == 0 else 2 key = key.replace(f"_projection.{projecton_layer}.", f"_projection.linear{transformers_projection_layer}.") if "audio" and "qkv" in key: # split qkv into query key and value mixed_qkv = value qkv_dim = mixed_qkv.size(0) // 3 query_layer = mixed_qkv[:qkv_dim] key_layer = mixed_qkv[qkv_dim : qkv_dim * 2] value_layer = mixed_qkv[qkv_dim * 2 :] model_state_dict[key.replace("qkv", "query")] = query_layer model_state_dict[key.replace("qkv", "key")] = key_layer model_state_dict[key.replace("qkv", "value")] = value_layer else: model_state_dict[key] = value return model_state_dict def convert_clap_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path, model_type, enable_fusion=False): clap_model = init_clap(checkpoint_path, model_type, enable_fusion=enable_fusion) clap_model.eval() state_dict = clap_model.model.state_dict() state_dict = rename_state_dict(state_dict) transformers_config = get_config_from_original(clap_model) transformers_config.audio_config.enable_fusion = enable_fusion model = ClapModel(transformers_config) # ignore the spectrogram embedding layer model.load_state_dict(state_dict, strict=False) model.save_pretrained(pytorch_dump_folder_path) transformers_config.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not") parser.add_argument("--model_type", default="HTSAT-tiny", type=str, help="Whether to enable fusion or not") args = parser.parse_args() convert_clap_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.model_type, args.enable_fusion )
transformers/src/transformers/models/clap/convert_clap_original_pytorch_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/clap/convert_clap_original_pytorch_to_hf.py", "repo_id": "transformers", "token_count": 2042 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert ColPali weights from the original repository to the HF model format. Original repository: https://github.com/illuin-tech/colpali. NOTE: This script was originally run using `torch==2.5.1` and with: ```bash python src/transformers/models/colpali/convert_colpali_weights_to_hf.py \ --model_id vidore/colpali-v1.2-merged \ --revision 89fd9736194236a1ecb7a9ec9b04f537f6f896af \ --original_vlm_name_or_path google/paligemma-3b-mix-448 \ --output_dir vidore/colpali-v1.2-hf-internal \ --push_to_hub python src/transformers/models/colpali/convert_colpali_weights_to_hf.py \ --model_id vidore/colpali-v1.3-merged \ --revision 5b955e3415a7c5468ab33119d98d6d45c3a5b2c3 \ --original_vlm_name_or_path google/paligemma-3b-mix-448 \ --output_dir vidore/colpali-v1.3-hf \ --push_to_hub ``` """ import argparse import glob from pathlib import Path from typing import Any, Dict, Optional import torch from huggingface_hub import snapshot_download from safetensors import safe_open from transformers import AutoConfig from transformers.models.colpali import ColPaliForRetrieval from transformers.models.colpali.configuration_colpali import ColPaliConfig from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) ORIGINAL_DTYPE = torch.bfloat16 def rename_state_dict_keys(state_dict: Dict[str, Any]) -> Dict[str, Any]: new_state_dict = {} for key, value in state_dict.items(): new_key = key if key.startswith("custom_text_proj"): new_key = key.replace("custom_text_proj", "embedding_proj_layer") if key.startswith("model."): new_key = key.replace("model.", "vlm.", 1) new_state_dict[new_key] = value return new_state_dict def load_original_state_dict(model_id: str, revision: Optional[str] = None) -> Dict[str, torch.Tensor]: directory_path = snapshot_download( repo_id=model_id, revision=revision, allow_patterns=["*.safetensors"], ) original_state_dict = {} for path in glob.glob(f"{directory_path}/*"): if path.endswith(".safetensors"): with safe_open(path, framework="pt", device="cpu") as f: for key in f.keys(): original_state_dict[key] = f.get_tensor(key) # Some weights are tied, so `lm.head`` is not saved. Let's clone to load state dict. if "lm_head.weight" not in original_state_dict: original_state_dict["vlm.language_model.lm_head.weight"] = original_state_dict[ "model.language_model.model.embed_tokens.weight" ].clone() return original_state_dict @torch.no_grad() def convert_colpali_weights_to_hf( model_id: str, output_dir: str, push_to_hub: bool, revision: Optional[str] = None, original_vlm_name_or_path: Optional[str] = None, ): # Load the original model data original_config = AutoConfig.from_pretrained( model_id, revision=revision, ) if original_vlm_name_or_path is not None: original_config._name_or_path = original_vlm_name_or_path if hasattr(original_config, "architectures"): delattr(original_config, "architectures") original_state_dict = load_original_state_dict(model_id, revision=revision) # Format the state_dict keys original_state_dict = rename_state_dict_keys(original_state_dict) # Create the new config config = ColPaliConfig( vlm_config=original_config, embedding_dim=128, # hardcoded in the original model ) config.model_type = "colpali" config.is_composition = False # Load the untrained model model = ColPaliForRetrieval(config=config).to("cpu").eval() print("Created model with new config and randomly initialized weights") # NOTE: The model was initialized with float32 weights. We need to convert it to the desired precision. # There are two ways to set the model's dtype: # - Using `model.from_pretrained(..., torch_dtype=dtype_precision)` doesn't convert the hyperparameters to the desired precision. # - Using `model.to(dtype_precision)` converts all values - including the hyperparameters - to the desired precision. # The following snippet allows a fine-grained control over the model's dtype, making sure that all # the new weights' dtypes match the original model. for param in model.parameters(): param.data = param.data.to(ORIGINAL_DTYPE) print(f"Converted the new model weights to `{ORIGINAL_DTYPE}`") # Load the original weights model.load_state_dict(original_state_dict) print("Loaded original model weights") # Tie the weights (following ColPali's `__init__`` step) if model.vlm.language_model._tied_weights_keys is not None: model._tied_weights_keys = [f"vlm.language_model.{k}" for k in model.vlm.language_model._tied_weights_keys] # Sanity check: ensure all keys are the same state_dict_keys_old = set(original_state_dict.keys()) state_dict_keys_new = set(model.state_dict().keys()) disjoint_keys = state_dict_keys_old.symmetric_difference(state_dict_keys_new) if disjoint_keys: raise ValueError(f"Incompatible keys: {disjoint_keys}") # Save the model if push_to_hub: model.push_to_hub(output_dir, private=True) print(f"Model pushed to the hub at `{output_dir}`") else: Path(output_dir).mkdir(exist_ok=True, parents=True) model.save_pretrained(output_dir) print(f"Model saved to `{output_dir}`") if __name__ == "__main__": parser = argparse.ArgumentParser( description=""" This script converts the original ColPali model to the HF model format. Example usage: ```bash python src/transformers/models/colpali/convert_colpali_weights_to_hf.py \ --model_id vidore/colpali-v1.2-merged \ --revision 89fd9736194236a1ecb7a9ec9b04f537f6f896af \ --original_vlm_name_or_path google/paligemma-3b-mix-448 \ --output_dir vidore/colpali-v1.2-hf \ --push_to_hub ``` """ ) parser.add_argument( "--model_id", help="Model ID of the original model to convert", ) parser.add_argument( "--output_dir", help="Location to write HF model and tokenizer", ) parser.add_argument( "--push_to_hub", help="Whether or not to push the model to the hub at `output_dir` instead of saving it locally", action="store_true", default=False, ) parser.add_argument( "--revision", help="Revision of the model to download", default=None, ) parser.add_argument( "--original_vlm_name_or_path", help="Name or path of the original VLM backbone model", default=None, ) args = parser.parse_args() convert_colpali_weights_to_hf( model_id=args.model_id, output_dir=args.output_dir, push_to_hub=args.push_to_hub, revision=args.revision, original_vlm_name_or_path=args.original_vlm_name_or_path, )
transformers/src/transformers/models/colpali/convert_colpali_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/colpali/convert_colpali_weights_to_hf.py", "repo_id": "transformers", "token_count": 3091 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DAB-DETR model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class DabDetrConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DabDetrModel`]. It is used to instantiate a DAB-DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DAB-DETR [IDEA-Research/dab_detr-base](https://huggingface.co/IDEA-Research/dab_detr-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] API. backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. backbone (`str`, *optional*, defaults to `"resnet50"`): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use pretrained weights for the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. num_queries (`int`, *optional*, defaults to 300): Number of object queries, i.e. detection slots. This is the maximal number of objects [`DabDetrModel`] can detect in a single image. For COCO, we recommend 100 queries. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in encoder. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Indicates whether the transformer model architecture is an encoder-decoder or not. activation_function (`str` or `function`, *optional*, defaults to `"prelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_size (`int`, *optional*, defaults to 256): This parameter is a general dimension parameter, defining dimensions for components such as the encoder layer and projection parameters in the decoder layer, among others. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1.0): The scaling factor used for the Xavier initialization gain in the HM Attention map module. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. class_cost (`float`, *optional*, defaults to 2): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. cls_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the classification loss in the object detection loss function. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. temperature_height (`int`, *optional*, defaults to 20): Temperature parameter to tune the flatness of positional attention (HEIGHT) temperature_width (`int`, *optional*, defaults to 20): Temperature parameter to tune the flatness of positional attention (WIDTH) query_dim (`int`, *optional*, defaults to 4): Query dimension parameter represents the size of the output vector. random_refpoints_xy (`bool`, *optional*, defaults to `False`): Whether to fix the x and y coordinates of the anchor boxes with random initialization. keep_query_pos (`bool`, *optional*, defaults to `False`): Whether to concatenate the projected positional embedding from the object query into the original query (key) in every decoder layer. num_patterns (`int`, *optional*, defaults to 0): Number of pattern embeddings. normalize_before (`bool`, *optional*, defaults to `False`): Whether we use a normalization layer in the Encoder or not. sine_position_embedding_scale (`float`, *optional*, defaults to 'None'): Scaling factor applied to the normalized positional encodings. initializer_bias_prior_prob (`float`, *optional*): The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`. If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights. Examples: ```python >>> from transformers import DabDetrConfig, DabDetrModel >>> # Initializing a DAB-DETR IDEA-Research/dab_detr-base style configuration >>> configuration = DabDetrConfig() >>> # Initializing a model (with random weights) from the IDEA-Research/dab_detr-base style configuration >>> model = DabDetrModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "dab-detr" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "encoder_attention_heads", } def __init__( self, use_timm_backbone=True, backbone_config=None, backbone="resnet50", use_pretrained_backbone=True, backbone_kwargs=None, num_queries=300, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, is_encoder_decoder=True, activation_function="prelu", hidden_size=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, dilation=False, class_cost=2, bbox_cost=5, giou_cost=2, cls_loss_coefficient=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, focal_alpha=0.25, temperature_height=20, temperature_width=20, query_dim=4, random_refpoints_xy=False, keep_query_pos=False, num_patterns=0, normalize_before=False, sine_position_embedding_scale=None, initializer_bias_prior_prob=None, **kwargs, ): if query_dim != 4: raise ValueError("The query dimensions has to be 4.") # We default to values which were previously hard-coded in the model. This enables configurability of the config # while keeping the default behavior the same. if use_timm_backbone and backbone_kwargs is None: backbone_kwargs = {} if dilation: backbone_kwargs["output_stride"] = 16 backbone_kwargs["out_indices"] = [1, 2, 3, 4] backbone_kwargs["in_chans"] = 3 # num_channels # Backwards compatibility elif not use_timm_backbone and backbone in (None, "resnet50"): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) backbone = None # set timm attributes to None dilation = None verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_queries = num_queries self.hidden_size = hidden_size self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.backbone_kwargs = backbone_kwargs # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.cls_loss_coefficient = cls_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.focal_alpha = focal_alpha self.query_dim = query_dim self.random_refpoints_xy = random_refpoints_xy self.keep_query_pos = keep_query_pos self.num_patterns = num_patterns self.normalize_before = normalize_before self.temperature_width = temperature_width self.temperature_height = temperature_height self.sine_position_embedding_scale = sine_position_embedding_scale self.initializer_bias_prior_prob = initializer_bias_prior_prob super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) __all__ = ["DabDetrConfig"]
transformers/src/transformers/models/dab_detr/configuration_dab_detr.py/0
{ "file_path": "transformers/src/transformers/models/dab_detr/configuration_dab_detr.py", "repo_id": "transformers", "token_count": 5207 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Data2VecText model.""" import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_data2vec_text import Data2VecTextConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 # General docstring _CHECKPOINT_FOR_DOC = "facebook/data2vec-text-base" _CONFIG_FOR_DOC = "Data2VecTextConfig" # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->Data2VecText class Data2VecTextForTextEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->Data2VecText class Data2VecTextSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in Data2VecTextModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class Data2VecTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states DATA2VEC_TEXT_SELF_ATTENTION_CLASSES = { "eager": Data2VecTextSelfAttention, } # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Data2VecText,BERT->DATA2VEC_TEXT class Data2VecTextAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = DATA2VEC_TEXT_SELF_ATTENTION_CLASSES[config._attn_implementation]( config, position_embedding_type=position_embedding_type ) self.output = Data2VecTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class Data2VecTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class Data2VecTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Data2VecText class Data2VecTextLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Data2VecTextAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = Data2VecTextAttention(config, position_embedding_type="absolute") self.intermediate = Data2VecTextIntermediate(config) self.output = Data2VecTextOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Data2VecText class Data2VecTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([Data2VecTextLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler class Data2VecTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class Data2VecTextPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Data2VecTextConfig base_model_prefix = "data2vec_text" supports_gradient_checkpointing = True _no_split_modules = ["Data2VecTextForTextEmbeddings", "Data2VecTextLayer"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): if hasattr(module, "bias") and module.bias is not None: module.bias.data.zero_() if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(1.0) DATA2VECTEXT_START_DOCSTRING = r""" Data2VecText was proposed in [data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/pdf/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and Michael Auli. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`Data2VecTextConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DATA2VECTEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Data2VecText Model for text transformer outputting raw hidden-states without any specific head on top.", DATA2VECTEXT_START_DOCSTRING, ) class Data2VecTextModel(Data2VecTextPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in *Attention is all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762 """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = Data2VecTextForTextEmbeddings(config) self.encoder = Data2VecTextEncoder(config) self.pooler = Data2VecTextPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.clap.modeling_clap.ClapTextModel.forward def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """Data2VecText Model with a `language modeling` head on top for CLM fine-tuning.""", DATA2VECTEXT_START_DOCSTRING ) class Data2VecTextForCausalLM(Data2VecTextPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `Data2VecTextLMHeadModel` as a standalone, add `is_decoder=True.`") self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) self.lm_head = Data2VecTextLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Returns: Example: ```python >>> from transformers import AutoTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("facebook/data2vec-text-base") >>> config = Data2VecTextConfig.from_pretrained("facebook/data2vec-text-base") >>> config.is_decoder = True >>> model = Data2VecTextForCausalLM.from_pretrained("facebook/data2vec-text-base", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: lm_loss = self.loss_function( prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings("""data2vec Model with a `language modeling` head on top.""", DATA2VECTEXT_START_DOCSTRING) class Data2VecTextForMaskedLM(Data2VecTextPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `Data2VecTextForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) self.lm_head = Data2VecTextLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` kwargs (`Dict[str, any]`, *optional*, defaults to *{}*): Used to hide legacy arguments that have been deprecated. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(prediction_scores.device) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Data2VecText class Data2VecTextLMHead(nn.Module): """Data2VecText Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) # For accelerate compatibility and to not break backward compatibility if self.decoder.bias.device.type == "meta": self.decoder.bias = self.bias else: self.bias = self.decoder.bias @add_start_docstrings( """ Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, DATA2VECTEXT_START_DOCSTRING, ) class Data2VecTextForSequenceClassification(Data2VecTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) self.classifier = Data2VecTextClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Data2VecText Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, DATA2VECTEXT_START_DOCSTRING, ) class Data2VecTextForMultipleChoice(Data2VecTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.data2vec_text = Data2VecTextModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.data2vec_text( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(reshaped_logits.device) loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Data2VecText Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, DATA2VECTEXT_START_DOCSTRING, ) class Data2VecTextForTokenClassification(Data2VecTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Data2VecText class Data2VecTextClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ Data2VecText Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, DATA2VECTEXT_START_DOCSTRING, ) class Data2VecTextForQuestionAnswering(Data2VecTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.data2vec_text( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx __all__ = [ "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", "Data2VecTextForQuestionAnswering", "Data2VecTextForSequenceClassification", "Data2VecTextForTokenClassification", "Data2VecTextModel", "Data2VecTextPreTrainedModel", ]
transformers/src/transformers/models/data2vec/modeling_data2vec_text.py/0
{ "file_path": "transformers/src/transformers/models/data2vec/modeling_data2vec_text.py", "repo_id": "transformers", "token_count": 29920 }
# coding=utf-8 # Copyright 2020 Microsoft and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for model DeBERTa.""" import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as sp from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spm.model"} class DebertaV2Tokenizer(PreTrainedTokenizer): r""" Constructs a DeBERTa-v2 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. bos_token (`string`, *optional*, defaults to `"[CLS]"`): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. eos_token (`string`, *optional*, defaults to `"[SEP]"`): The end of sequence token. When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, do_lower_case=False, split_by_punct=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs if not os.path.isfile(vocab_file): raise ValueError( f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" " model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) self.do_lower_case = do_lower_case self.split_by_punct = split_by_punct self.vocab_file = vocab_file self._tokenizer = SPMTokenizer( vocab_file, None, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs ) unk_token = AddedToken(unk_token, normalized=True, special=True) if isinstance(unk_token, str) else unk_token super().__init__( do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self._tokenizer.special_tokens = self.all_special_tokens @property def vocab_size(self): return len(self.vocab) @property def vocab(self): return self._tokenizer.vocab def get_vocab(self): vocab = self.vocab.copy() vocab.update(self.get_added_vocab()) return vocab def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" if self.do_lower_case: text = text.lower() return self._tokenizer.tokenize(text) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self._tokenizer.spm.PieceToId(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self._tokenizer.spm.IdToPiece(index) if index < self.vocab_size else self.unk_token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" return self._tokenizer.decode(tokens) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format: - single sequence: [CLS] X [SEP] - pair of sequences: [CLS] A [SEP] B [SEP] Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", False) if is_split_into_words or add_prefix_space: text = " " + text return (text, kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: return self._tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix) class SPMTokenizer: r""" Constructs a tokenizer based on [SentencePiece](https://github.com/google/sentencepiece). Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ def __init__( self, vocab_file, special_tokens, split_by_punct=False, sp_model_kwargs: Optional[Dict[str, Any]] = None ): self.split_by_punct = split_by_punct self.vocab_file = vocab_file self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs spm = sp.SentencePieceProcessor(**self.sp_model_kwargs) if not os.path.exists(vocab_file): raise FileNotFoundError(f"{vocab_file} does not exist!") spm.load(vocab_file) bpe_vocab_size = spm.GetPieceSize() # Token map # <unk> 0+1 # <s> 1+1 # </s> 2+1 self.vocab = {spm.IdToPiece(i): i for i in range(bpe_vocab_size)} self.ids_to_tokens = [spm.IdToPiece(i) for i in range(bpe_vocab_size)] # self.vocab['[PAD]'] = 0 # self.vocab['[CLS]'] = 1 # self.vocab['[SEP]'] = 2 # self.vocab['[UNK]'] = 3 self.spm = spm self.special_tokens = special_tokens def __getstate__(self): state = self.__dict__.copy() state["spm"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.spm = sp.SentencePieceProcessor(**self.sp_model_kwargs) self.spm.Load(self.vocab_file) def tokenize(self, text): return self._encode_as_pieces(text) def convert_ids_to_tokens(self, ids): tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens def decode(self, tokens, start=-1, end=-1, raw_text=None): if raw_text is None: current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.special_tokens: if not prev_is_special: out_string += " " out_string += self.spm.decode_pieces(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.spm.decode_pieces(current_sub_tokens) return out_string.strip() else: words = self.split_to_words(raw_text) word_tokens = [self.tokenize(w) for w in words] token2words = [0] * len(tokens) tid = 0 for i, w in enumerate(word_tokens): for k, t in enumerate(w): token2words[tid] = i tid += 1 word_start = token2words[start] word_end = token2words[end] if end < len(tokens) else len(words) text = "".join(words[word_start:word_end]) return text # TODO add a deprecation cycle as this can have different behaviour from our API def add_special_token(self, token): if token not in self.special_tokens: self.special_tokens.append(token) if token not in self.vocab: self.vocab[token] = len(self.vocab) - 1 self.ids_to_tokens.append(token) return self.id(token) def part_of_whole_word(self, token, is_bos=False): logger.warning_once( "The `DebertaTokenizer.part_of_whole_word` method is deprecated and will be removed in `transformers==4.35`" ) if is_bos: return True if ( len(token) == 1 and (_is_whitespace(list(token)[0]) or _is_control(list(token)[0]) or _is_punctuation(list(token)[0])) ) or token in self.special_tokens: return False word_start = b"\xe2\x96\x81".decode("utf-8") return not token.startswith(word_start) def pad(self): return "[PAD]" def bos(self): return "[CLS]" def eos(self): return "[SEP]" def unk(self): return "[UNK]" def mask(self): return "[MASK]" def sym(self, id): return self.ids_to_tokens[id] def id(self, sym): logger.warning_once( "The `DebertaTokenizer.id` method is deprecated and will be removed in `transformers==4.35`" ) return self.vocab[sym] if sym in self.vocab else 1 def _encode_as_pieces(self, text): text = convert_to_unicode(text) if self.split_by_punct: words = self._run_split_on_punc(text) pieces = [self.spm.encode(w, out_type=str) for w in words] return [p for w in pieces for p in w] else: return self.spm.encode(text, out_type=str) def split_to_words(self, text): pieces = self._encode_as_pieces(text) word_start = b"\xe2\x96\x81".decode("utf-8") words = [] offset = 0 prev_end = 0 for i, p in enumerate(pieces): if p.startswith(word_start): if offset > prev_end: words.append(text[prev_end:offset]) prev_end = offset w = p.replace(word_start, "") else: w = p try: s = text.index(w, offset) pn = "" k = i + 1 while k < len(pieces): pn = pieces[k].replace(word_start, "") if len(pn) > 0: break k += 1 if len(pn) > 0 and pn in text[offset:s]: offset = offset + 1 else: offset = s + len(w) except Exception: offset = offset + 1 if prev_end < offset: words.append(text[prev_end:offset]) return words def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def save_pretrained(self, path: str, filename_prefix: str = None): filename = VOCAB_FILES_NAMES[list(VOCAB_FILES_NAMES.keys())[0]] if filename_prefix is not None: filename = filename_prefix + "-" + filename full_path = os.path.join(path, filename) with open(full_path, "wb") as fs: fs.write(self.spm.serialized_model_proto()) return (full_path,) def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise TypeError(f"Unsupported string type: {type(text)}") __all__ = ["DebertaV2Tokenizer"]
transformers/src/transformers/models/deberta_v2/tokenization_deberta_v2.py/0
{ "file_path": "transformers/src/transformers/models/deberta_v2/tokenization_deberta_v2.py", "repo_id": "transformers", "token_count": 9334 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DeiT distilled checkpoints from the timm library.""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, base_model=False): rename_keys = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias")) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("deit") else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config, base_model=False): for i in range(config.num_hidden_layers): if base_model: prefix = "" else: prefix = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ : config.hidden_size, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_deit_checkpoint(deit_name, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our DeiT structure. """ # define default DeiT configuration config = DeiTConfig() # all deit models have fine-tuned heads base_model = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size config.num_labels = 1000 repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} config.patch_size = int(deit_name[-6:-4]) config.image_size = int(deit_name[-3:]) # size of the architecture if deit_name[9:].startswith("tiny"): config.hidden_size = 192 config.intermediate_size = 768 config.num_hidden_layers = 12 config.num_attention_heads = 3 elif deit_name[9:].startswith("small"): config.hidden_size = 384 config.intermediate_size = 1536 config.num_hidden_layers = 12 config.num_attention_heads = 6 if deit_name[9:].startswith("base"): pass elif deit_name[4:].startswith("large"): config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 # load original model from timm timm_model = timm.create_model(deit_name, pretrained=True) timm_model.eval() # load state_dict of original model, remove and rename some keys state_dict = timm_model.state_dict() rename_keys = create_rename_keys(config, base_model) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, base_model) # load HuggingFace model model = DeiTForImageClassificationWithTeacher(config).eval() model.load_state_dict(state_dict) # Check outputs on an image, prepared by DeiTImageProcessor size = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 image_processor = DeiTImageProcessor(size=size, crop_size=config.image_size) encoding = image_processor(images=prepare_img(), return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values) timm_logits = timm_model(pixel_values) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(timm_logits, outputs.logits, atol=1e-3) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {deit_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--deit_name", default="vit_deit_base_distilled_patch16_224", type=str, help="Name of the DeiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) args = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
transformers/src/transformers/models/deit/convert_deit_timm_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/deit/convert_deit_timm_to_pytorch.py", "repo_id": "transformers", "token_count": 3875 }