text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest from transformers import SPIECE_UNDERLINE, AddedToken, BatchEncoding, SiglipTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" @require_sentencepiece @require_tokenizers class SiglipTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "google/siglip-base-patch16-224" tokenizer_class = SiglipTokenizer test_rust_tokenizer = False test_sentencepiece = True test_sentencepiece_ignore_case = True # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.setUp with T5->Siglip def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = SiglipTokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_convert_token_and_id with T5->Siglip def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") def test_full_tokenizer(self): tokenizer = SiglipTokenizer(SAMPLE_VOCAB) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁this", "▁is", "▁a", "▁t", "est"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [66, 46, 10, 170, 382]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE, "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [7, 23, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 12, 66, 46, 72, 80, 6, 0]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE, "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ], ) @cached_property def siglip_tokenizer(self): return SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224") # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.get_tokenizer with T5->Siglip def get_tokenizer(self, **kwargs) -> SiglipTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_rust_and_python_full_tokenizers with T5->Siglip def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_eos_treatment(self): tokenizer = self.siglip_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_prepare_batch(self): tokenizer = self.siglip_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [262, 266, 476, 8532, 270, 4460, 3949, 1682, tokenizer.eos_token_id] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) def test_empty_target_text(self): tokenizer = self.siglip_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length(self): tokenizer = self.siglip_tokenizer tgt_text = ["Summary of the text.", "Another summary."] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_eos_in_input(self): tokenizer = self.siglip_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [262, 266, 476, 8532, 270, 4460, 3949, 1682, 1] expected_tgt_tokens = [6254, 267, 260, 1443, 1] batch = tokenizer(src_text, text_target=tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, batch["labels"][0]) @unittest.skip(reason="SiglipTokenizer strips the punctuation") def test_subword_regularization_tokenizer(self): pass @unittest.skip(reason="SiglipTokenizer strips the punctuation") def test_pickle_subword_regularization_tokenizer(self): pass # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_special_tokens_initialization with T5->Siglip def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [f"<extra_id_{i}>" for i in range(100)] + [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") r_output = tokenizer_r.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in r_output) self.assertTrue(special_token_id in cr_output) # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_special_tokens_initialization_with_non_empty_additional_special_tokens with T5->Siglip def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(100)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # BySiglipTokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_sentencepiece_tokenize_and_convert_tokens_to_string(self): """Test ``_tokenize`` and ``convert_tokens_to_string``.""" if not self.test_sentencepiece: self.skipTest(reason="test_sentencepiece is set to False") tokenizer = self.get_tokenizer() text = "This is text to test the tokenizer." if self.test_sentencepiece_ignore_case: text = text.lower() tokens = tokenizer.tokenize(text) self.assertTrue(len(tokens) > 0) # check if converting back to original text works reverse_text = tokenizer.convert_tokens_to_string(tokens) if self.test_sentencepiece_ignore_case: reverse_text = reverse_text.lower() expected_text = "this is text to test the tokenizer" self.assertEqual(reverse_text, expected_text) special_tokens = tokenizer.all_special_tokens special_tokens_string = tokenizer.convert_tokens_to_string(special_tokens) for special_token in special_tokens: self.assertIn(special_token, special_tokens_string) if self.test_rust_tokenizer: rust_tokenizer = self.get_rust_tokenizer() special_tokens_string_rust = rust_tokenizer.convert_tokens_to_string(special_tokens) self.assertEqual(special_tokens_string, special_tokens_string_rust) @slow def test_tokenizer_integration(self): tokenizer = SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224") # fmt: off texts = [ 'the real mountain view', 'Zürich', 'San Francisco', 'a picture of a laptop with the lockscreen on, a cup of cappucino, salt and pepper grinders. The view through the window reveals lake Zürich and the Alps in the background of the city.', ] expected_input_ids = [ [260, 638, 3293, 870, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [262, 761, 5879, 5345, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [262, 264, 452, 20563, 15949, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [262, 266, 1357, 267, 262, 266, 4429, 275, 260, 3940, 6360, 277, 262, 266, 3064, 267, 3549, 388, 16538, 296, 298, 2617, 263, 4869, 14998, 264, 260, 870, 393, 260, 1710, 7958, 4324, 262, 761, 5879, 5345, 263, 260, 1518, 388, 264, 268, 260, 1970, 267, 260, 741, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ] # fmt: on for text, expected in zip(texts, expected_input_ids): input_ids = tokenizer(text, padding="max_length").input_ids self.assertListEqual(input_ids, expected) def test_some_edge_cases(self): tokenizer = SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224", legacy=False) sp_tokens = tokenizer.sp_model.encode("</s>>", out_type=str) self.assertEqual(sp_tokens, ["</", "s", ">", ">"]) tokens = tokenizer.tokenize("</s>>") self.assertNotEqual(sp_tokens, tokens) self.assertEqual(tokens, ["</s>"]) tokens = tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("", out_type=str)) tokens = tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode(" ", out_type=str)) tokens = tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str)) tokens = tokenizer.tokenize(" ▁") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str)) @require_sentencepiece @require_tokenizers class CommonSpmIntegrationTests(unittest.TestCase): """ A class that regroups important test to make sure that we properly handle the special tokens. """ @classmethod def setUpClass(cls): tokenizer = SiglipTokenizer(SAMPLE_VOCAB, extra_ids=0, legacy=False) tokenizer.add_special_tokens( {"additional_special_tokens": [AddedToken("<extra_id_0>", rstrip=False, lstrip=False)]} ) cls.tokenizer = tokenizer def test_add_dummy_prefix(self): # make sure `'▁'` is prepended, and outputs match sp_model's # `sentencepiece.NormalizerSpec.add_dummy_prefix` attribute input_ids = self.tokenizer.encode(". Hello", add_special_tokens=False) self.assertEqual(input_ids, [37, 86, 20]) self.assertEqual(input_ids, [37, 86, 20]) tokens = self.tokenizer.tokenize(". Hello") self.assertEqual(tokens, ["▁he", "ll", "o"]) tokens = self.tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str)) tokens = self.tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode(" ", out_type=str)) tokens = self.tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("▁", out_type=str)) def test_remove_extra_whitespaces(self): # make sure the extra spaces are eaten # sentencepiece.NormalizerSpec.remove_extra_whitespaces attribute input_ids = self.tokenizer.encode(" . Hello", add_special_tokens=False) self.assertEqual(input_ids, [37, 86, 20]) self.assertEqual(input_ids, [37, 86, 20]) tokens = self.tokenizer.tokenize(" . Hello") self.assertEqual(tokens, ["▁he", "ll", "o"]) # `'▁'` is also a whitespace input_ids = self.tokenizer.encode("▁He is not") self.assertEqual(input_ids, [37, 46, 44, 2]) tokens = self.tokenizer.tokenize("▁He is not") self.assertEqual(tokens, ["▁he", "▁is", "▁not"]) # no extra space added input_ids = self.tokenizer.encode("▁He is not ▁He") self.assertEqual(input_ids, [37, 46, 44, 37, 2]) tokens = self.tokenizer.tokenize("▁He is not ▁He") self.assertEqual(tokens, ["▁he", "▁is", "▁not", "▁he"]) # spaces are eaten by spm even if not start
transformers/tests/models/siglip/test_tokenization_siglip.py/0
{ "file_path": "transformers/tests/models/siglip/test_tokenization_siglip.py", "repo_id": "transformers", "token_count": 9594 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Splinter model.""" import copy import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import SplinterConfig, SplinterForPreTraining, SplinterForQuestionAnswering, SplinterModel class SplinterModelTester: def __init__( self, parent, batch_size=13, num_questions=3, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, question_token_id=1, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_questions = num_questions self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.question_token_id = question_token_id self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids[:, 1] = self.question_token_id input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) start_positions = None end_positions = None question_positions = None if self.use_labels: start_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) end_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size) question_positions = ids_tensor([self.batch_size, self.num_questions], self.num_labels) config = SplinterConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, question_token_id=self.question_token_id, ) return (config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions[:, 0], end_positions=end_positions[:, 0], ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ): model = SplinterForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.num_questions, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class SplinterModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SplinterModel, SplinterForQuestionAnswering, SplinterForPreTraining, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": SplinterModel, "question-answering": SplinterForQuestionAnswering} if is_torch_available() else {} ) # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if pipeline_test_case_name == "QAPipelineTests": return True elif pipeline_test_case_name == "FeatureExtractionPipelineTests" and tokenizer_name.endswith("Fast"): return True return False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if issubclass(model_class, SplinterForPreTraining): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) inputs_dict["question_positions"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_questions, dtype=torch.long, device=torch_device, ) elif issubclass(model_class, SplinterForQuestionAnswering): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = SplinterModelTester(self) self.config_tester = ConfigTester(self, config_class=SplinterConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): if isinstance(model, SplinterForPreTraining): with self.assertRaises(TypeError): # question_positions must not be None. model(**inputs)[0] else: model(**inputs)[0] @slow def test_model_from_pretrained(self): model_name = "tau/splinter-base" model = SplinterModel.from_pretrained(model_name) self.assertIsNotNone(model) # overwrite from common since `SplinterForPreTraining` could contain different number of question tokens in inputs. # When the batch is distributed to multiple devices, each replica could get different values for the maximal number # of question tokens (see `SplinterForPreTraining._prepare_question_positions()`), and the model returns different # shape along dimension 1 (i.e. `num_questions`) that could not be combined into a single tensor as an output. @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): from torch import nn config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: # Skip this case since it will fail sometimes, as described above. if model_class == SplinterForPreTraining: continue model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch class SplinterModelIntegrationTest(unittest.TestCase): @slow def test_splinter_question_answering(self): model = SplinterForQuestionAnswering.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] Brad was born in [QUESTION] . He returned to the United Kingdom later . [SEP]" # Output should be the span "the United Kingdom" input_ids = torch.tensor( [[101, 7796, 1108, 1255, 1107, 104, 119, 1124, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) output = model(input_ids) expected_shape = torch.Size((1, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits), 10) self.assertEqual(torch.argmax(output.end_logits), 12) @slow def test_splinter_pretraining(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) question_positions = torch.tensor([[1, 5]], dtype=torch.long) output = model(input_ids, question_positions=question_positions) expected_shape = torch.Size((1, 2, 16)) self.assertEqual(output.start_logits.shape, expected_shape) self.assertEqual(output.end_logits.shape, expected_shape) self.assertEqual(torch.argmax(output.start_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.end_logits[0, 0]), 7) self.assertEqual(torch.argmax(output.start_logits[0, 1]), 10) self.assertEqual(torch.argmax(output.end_logits[0, 1]), 12) @slow def test_splinter_pretraining_loss_requires_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [[101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) with self.assertRaises(TypeError): model( input_ids, start_positions=start_positions, end_positions=end_positions, ) @slow def test_splinter_pretraining_loss(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10], [7, 10]], dtype=torch.long) end_positions = torch.tensor([[7, 12], [7, 12]], dtype=torch.long) question_positions = torch.tensor([[1, 5], [1, 5]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) self.assertAlmostEqual(output.loss.item(), 0.0024, 4) @slow def test_splinter_pretraining_loss_with_padding(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") # Input: "[CLS] [QUESTION] was born in [QUESTION] . Brad returned to the United Kingdom later . [SEP]" # Output should be the spans "Brad" and "the United Kingdom" input_ids = torch.tensor( [ [101, 104, 1108, 1255, 1107, 104, 119, 7796, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102], ] ) start_positions = torch.tensor([[7, 10]], dtype=torch.long) end_positions = torch.tensor([7, 12], dtype=torch.long) question_positions = torch.tensor([[1, 5]], dtype=torch.long) start_positions_with_padding = torch.tensor([[7, 10, 0]], dtype=torch.long) end_positions_with_padding = torch.tensor([7, 12, 0], dtype=torch.long) question_positions_with_padding = torch.tensor([[1, 5, 0]], dtype=torch.long) output = model( input_ids, start_positions=start_positions, end_positions=end_positions, question_positions=question_positions, ) output_with_padding = model( input_ids, start_positions=start_positions_with_padding, end_positions=end_positions_with_padding, question_positions=question_positions_with_padding, ) self.assertAlmostEqual(output.loss.item(), output_with_padding.loss.item(), 4) # Note that the original code uses 0 to denote padded question tokens # and their start and end positions. As the pad_token_id of the model's # config is used for the losse's ignore_index in SplinterForPreTraining, # we add this test to ensure anybody making changes to the default # value of the config, will be aware of the implication. self.assertEqual(model.config.pad_token_id, 0) @slow def test_splinter_pretraining_prepare_question_positions(self): model = SplinterForPreTraining.from_pretrained("tau/splinter-base-qass") input_ids = torch.tensor( [ [101, 104, 1, 2, 104, 3, 4, 102], [101, 1, 104, 2, 104, 3, 104, 102], [101, 1, 2, 104, 104, 3, 4, 102], [101, 1, 2, 3, 4, 5, 104, 102], ] ) question_positions = torch.tensor([[1, 4, 0], [2, 4, 6], [3, 4, 0], [6, 0, 0]], dtype=torch.long) output_without_positions = model(input_ids) output_with_positions = model(input_ids, question_positions=question_positions) self.assertTrue((output_without_positions.start_logits == output_with_positions.start_logits).all()) self.assertTrue((output_without_positions.end_logits == output_with_positions.end_logits).all())
transformers/tests/models/splinter/test_modeling_splinter.py/0
{ "file_path": "transformers/tests/models/splinter/test_modeling_splinter.py", "repo_id": "transformers", "token_count": 9653 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SwiftFormer model.""" import copy import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SwiftFormerModelTester: def __init__( self, parent, batch_size=13, num_channels=3, is_training=True, use_labels=True, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, image_size=224, num_labels=3, layer_depths=[1, 1, 1, 1], embed_dims=[16, 16, 32, 32], ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_labels = num_labels self.image_size = image_size self.layer_depths = layer_depths self.embed_dims = embed_dims def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return SwiftFormerConfig( depths=self.layer_depths, embed_dims=self.embed_dims, mlp_ratio=4, downsamples=[True, True, True, True], hidden_act="gelu", num_labels=self.num_labels, down_patch_size=3, down_stride=2, down_pad=1, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-5, ) def create_and_check_model(self, config, pixel_values, labels): model = SwiftFormerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dims[-1], 7, 7)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = SwiftFormerForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) model = SwiftFormerForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): (config, pixel_values, labels) = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SwiftFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SwiftFormer does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = SwiftFormerModelTester(self) self.config_tester = ConfigTester( self, config_class=SwiftFormerConfig, has_text_modality=False, hidden_size=37, num_attention_heads=12, num_hidden_layers=12, ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SwiftFormer does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "MBZUAI/swiftformer-xs" model = SwiftFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="SwiftFormer does not output attentions") def test_attention_outputs(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 8 self.assertEqual(len(hidden_states), expected_num_stages) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(hidden_states)): self.assertEqual( hidden_states[i].shape, torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ), ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class SwiftFormerModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/swiftformer/test_modeling_swiftformer.py/0
{ "file_path": "transformers/tests/models/swiftformer/test_modeling_swiftformer.py", "repo_id": "transformers", "token_count": 4745 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch UniSpeech model.""" import math import unittest import numpy as np import pytest from datasets import load_dataset from transformers import UniSpeechConfig, is_torch_available from transformers.testing_utils import is_flaky, require_soundfile, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) class UniSpeechModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return UniSpeechConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, ) def create_and_check_model(self, config, input_values, attention_mask): model = UniSpeechModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = UniSpeechModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = UniSpeechForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = UniSpeechForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = UniSpeechForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = UniSpeechForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = UniSpeechForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with pytest.raises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class UniSpeechRobustModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (UniSpeechForCTC, UniSpeechModel, UniSpeechForSequenceClassification, UniSpeechForPreTraining) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": UniSpeechForSequenceClassification, "automatic-speech-recognition": UniSpeechForCTC, "feature-extraction": UniSpeechModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = UniSpeechModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=UniSpeechConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @is_flaky( description="The `codevector_idx` computed with `argmax()` in `UniSpeechGumbelVectorQuantizer.forward` is not stable." ) def test_batching_equivalence(self): super().test_batching_equivalence() def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # UniSpeech has no inputs_embeds @unittest.skip(reason="UniSpeech has no inputs_embeds") def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` @unittest.skip(reason="UniSpeech has no inputs_embeds") def test_forward_signature(self): pass # UniSpeech cannot resize token embeddings # since it has no tokens embeddings @unittest.skip(reason="UniSpeech has no tokens embeds") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="UniSpeech has no inputs_embeds") def test_model_get_set_embeddings(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = UniSpeechForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = UniSpeechForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_feature_prob_ctc_single_batch(self): model = UniSpeechForCTC.from_pretrained( "hf-internal-testing/tiny-random-unispeech", mask_time_prob=0.2, mask_feature_prob=0.2, mask_time_length=2, mask_feature_length=2, ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-unispeech", return_attention_mask=True ) batch_duration_in_seconds = [6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (1, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = UniSpeechModel.from_pretrained("microsoft/unispeech-large-1500h-cv") self.assertIsNotNone(model) @require_torch @require_soundfile @slow class UniSpeechModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test", trust_remote_code=True) return ds[:num_samples] def test_inference_pretraining(self): model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-large-xlsr-53") input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) with torch.no_grad(): torch.manual_seed(0) outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # pretrained model should have learned a high cosine similarity self.assertTrue(cosine_sim.mean() > 0.5) # fmt: off expected_cosine_sim_slice = torch.tensor( [[0.8290, 0.8335, 0.8815, 0.8580, 0.8249], [0.8892, 0.9221, 0.8711, 0.8601, 0.8482]], device=torch_device, ) # fmt: on torch.testing.assert_close(cosine_sim[:, :5], expected_cosine_sim_slice, rtol=1e-3, atol=1e-3)
transformers/tests/models/unispeech/test_modeling_unispeech.py/0
{ "file_path": "transformers/tests/models/unispeech/test_modeling_unispeech.py", "repo_id": "transformers", "token_count": 10589 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ViLT model.""" import unittest from datasets import load_dataset from packaging import version from transformers import ViltConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltForTokenClassification, ViltModel, ) from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES if is_vision_available(): import PIL from PIL import Image from transformers import ViltProcessor class ViltModelTester: def __init__( self, parent, batch_size=13, seq_length=7, image_size=30, patch_size=2, num_channels=3, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, scope=None, modality_type_vocab_size=2, add_multiple_images=False, num_images=-1, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.modality_type_vocab_size = modality_type_vocab_size self.add_multiple_images = add_multiple_images self.num_images = num_images # we set the expected sequence length (which is used in several tests) # this is equal to the seq length of the text tokens + number of image patches + 1 for the CLS token self.expected_seq_len = self.seq_length + (self.image_size // self.patch_size) ** 2 + 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) if self.add_multiple_images: pixel_values = floats_tensor([self.batch_size, 2, self.num_channels, self.image_size, self.image_size]) else: pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return (config, input_ids, token_type_ids, input_mask, pixel_values, token_labels) def get_config(self): return ViltConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, num_labels=self.num_labels, modality_type_vocab_size=self.modality_type_vocab_size, num_images=self.num_images, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, pixel_values, token_labels, ): model = ViltModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, pixel_values=pixel_values) result = model(input_ids, token_type_ids=token_type_ids, pixel_values=pixel_values) result = model(input_ids, pixel_values=pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) ) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, pixel_values, token_labels, ): model = ViltForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, pixel_values=pixel_values) result = model(input_ids, token_type_ids=token_type_ids, pixel_values=pixel_values) result = model(input_ids, pixel_values=pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, pixel_values, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, "pixel_values": pixel_values, } return config, inputs_dict def prepare_pixel_values(self): return floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) @require_torch class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ViltModel, ViltForQuestionAnswering, ViltForImageAndTextRetrieval, ViltForMaskedLM, ViltForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": ViltModel, "visual-question-answering": ViltForQuestionAnswering} if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False model_split_percents = [0.5, 0.8, 0.9] # ViltForMaskedLM, ViltForQuestionAnswering and ViltForImagesAndTextClassification require special treatment def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "ViltForQuestionAnswering": inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, self.model_tester.num_labels, device=torch_device ) elif model_class.__name__ in ["ViltForMaskedLM", "ViltForTokenClassification"]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) elif model_class.__name__ == "ViltForImagesAndTextClassification": inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = ViltModelTester(self) self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False.") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ == "ViltForImagesAndTextClassification": config.modality_type_vocab_size = 3 # ViltForImageAndTextRetrieval doesn't support training for now if model_class.__name__ in [*MODEL_MAPPING_NAMES.values(), "ViltForImageAndTextRetrieval"]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) for k, v in inputs.items(): print(k, v.shape) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False.") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True # ViltForImageAndTextRetrieval doesn't support training for now if ( model_class.__name__ in [*MODEL_MAPPING_NAMES.values(), "ViltForImageAndTextRetrieval"] or not model_class.supports_gradient_checkpointing ): continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic hidden states""" ) def test_save_load(self): pass @unittest.skip( reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic hidden states""" ) def test_determinism(self): pass @unittest.skip( "VilT samples image tokens from a multinomial distribution, resulting in not deterministic hidden states" ) def test_batching_equivalence(self): pass @unittest.skip( reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic hidden states""" ) def test_model_outputs_equivalence(self): pass @unittest.skip( reason="""VilT samples image tokens from a multinomial distribution, resulting in not deterministic hidden states. Cannot test equivalence on logit level""" ) def test_inputs_embeds_matches_input_ids(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "expected_seq_len", None) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions if model_class.__name__ == "ViltForImagesAndTextClassification": # attentions are a list of length num_images # each element contains the attentions of a particular image index self.assertEqual(len(attentions), self.model_tester.num_images) self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers) else: self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions if model_class.__name__ == "ViltForImagesAndTextClassification": # attentions are a list of length num_images # each element contains the attentions of a particular image index self.assertEqual(len(attentions), self.model_tester.num_images) self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers) else: self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if model_class.__name__ == "ViltForImagesAndTextClassification": self.assertListEqual( list(attentions[0][0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions if model_class.__name__ == "ViltForImagesAndTextClassification": self.assertEqual(len(self_attentions), self.model_tester.num_images) self.assertEqual(len(self_attentions[0]), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0][0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) else: self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) if model_class.__name__ == "ViltForImagesAndTextClassification": # hidden_states are a list of length num_images # each element contains the hidden states of a particular image index self.assertEqual(len(hidden_states), self.model_tester.num_images) self.assertEqual(len(hidden_states[0]), expected_num_layers) else: self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.expected_seq_len if model_class.__name__ == "ViltForImagesAndTextClassification": self.assertListEqual( list(hidden_states[0][0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) else: self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: print("Model class:", model_class) inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] if model_class.__name__ == "ViltForImagesAndTextClassification": # hidden_states are a list of length num_images # each element contains the hidden states of a particular image index hidden_states[0].retain_grad() attentions[0].retain_grad() else: hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) if model_class.__name__ == "ViltForImagesAndTextClassification": # hidden_states are a list of length num_images # each element contains the hidden states of a particular image index self.assertIsNotNone(hidden_states[0].grad) self.assertIsNotNone(attentions[0].grad) else: self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) @slow def test_model_from_pretrained(self): model_name = "dandelin/vilt-b32-mlm" model = ViltModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class ViltForImagesAndTextClassificationModelTest(ViltModelTest, unittest.TestCase): all_model_classes = (ViltForImagesAndTextClassification,) if is_torch_available() else () def setUp(self): self.model_tester = ViltModelTester(self, modality_type_vocab_size=3, add_multiple_images=True, num_images=2) self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37) @unittest.skip(reason="We only test the model that takes in multiple images") def test_model(self): pass @unittest.skip(reason="We only test the model that takes in multiple images") def test_for_token_classification(self): pass # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViltModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): return ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa") if is_vision_available() else None @slow def test_inference_masked_lm(self): model = ViltForMaskedLM.from_pretrained("dandelin/vilt-b32-mlm").to(torch_device) processor = self.default_processor image = prepare_img() text = "a bunch of [MASK] laying on a [MASK]." inputs = processor(image, text, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size([1, 11, 30522]) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174]).to(torch_device) torch.testing.assert_close(outputs.logits[0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4) # verify masked token prediction equals "cats" predicted_id = outputs.logits[0, 4, :].argmax(-1).item() assert processor.decode([predicted_id]) == "cats" @slow def test_inference_visual_question_answering(self): model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa").to(torch_device) processor = self.default_processor image = prepare_img() text = "How many cats are there?" inputs = processor(image, text, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 3129)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) # compute loss vqa_labels = [[2, 3, 155, 800]] vqa_scores = [[1.0, 0.3, 0.3, 0.3]] labels = torch.zeros(1, model.config.num_labels).to(torch_device) for i, (labels_example, scores_example) in enumerate(zip(vqa_labels, vqa_scores)): for l, s in zip(labels_example, scores_example): labels[i, l] = s # forward pass outputs = model(**inputs, labels=labels) # verify we have a positive loss self.assertTrue(outputs.loss > 0) @slow def test_inference_natural_language_visual_reasoning(self): model = ViltForImagesAndTextClassification.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2").to( torch_device ) processor = self.default_processor dataset = load_dataset("hf-internal-testing/fixtures_nlvr2", split="test", trust_remote_code=True) image1 = Image.open(dataset[0]["file"]).convert("RGB") image2 = Image.open(dataset[1]["file"]).convert("RGB") text = ( "The left image contains twice the number of dogs as the right image, and at least two dogs in total are" " standing." ) encoding_1 = processor(image1, text, return_tensors="pt") encoding_2 = processor(image2, text, return_tensors="pt") pixel_values = torch.stack([encoding_1.pixel_values, encoding_2.pixel_values], dim=1) # forward pass outputs = model( input_ids=encoding_1.input_ids.to(torch_device), pixel_values=pixel_values.to(torch_device), ) # verify the logits expected_shape = torch.Size([1, 2]) self.assertEqual(outputs.logits.shape, expected_shape) is_pillow_less_than_9 = version.parse(PIL.__version__) < version.parse("9.0.0") if is_pillow_less_than_9: expected_slice = torch.tensor( [-2.4013, 2.9342], device=torch_device, ) else: expected_slice = torch.tensor( [-2.3713, 2.9168], device=torch_device, ) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/vilt/test_modeling_vilt.py/0
{ "file_path": "transformers/tests/models/vilt/test_modeling_vilt.py", "repo_id": "transformers", "token_count": 12181 }
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import ViTImageProcessor if is_torchvision_available(): from transformers import ViTImageProcessorFast class ViTImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ViTImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ViTImageProcessor if is_vision_available() else None fast_image_processing_class = ViTImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = ViTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42})
transformers/tests/models/vit/test_image_processing_vit.py/0
{ "file_path": "transformers/tests/models/vit/test_image_processing_vit.py", "repo_id": "transformers", "token_count": 1680 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VitPose model.""" import inspect import unittest import requests from transformers import VitPoseBackboneConfig, VitPoseConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import VitPoseForPoseEstimation if is_vision_available(): from PIL import Image from transformers import VitPoseImageProcessor class VitPoseModelTester: def __init__( self, parent, batch_size=13, image_size=[16 * 8, 12 * 8], patch_size=[8, 8], num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=2, scale_factor=4, out_indices=[-1], scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scale_factor = scale_factor self.out_indices = out_indices self.scope = scope # in VitPose, the seq length equals the number of patches num_patches = (image_size[0] // patch_size[0]) * (image_size[1] // patch_size[1]) self.seq_length = num_patches def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return VitPoseConfig( backbone_config=self.get_backbone_config(), ) def get_backbone_config(self): return VitPoseBackboneConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_hidden_layers=self.num_hidden_layers, hidden_size=self.hidden_size, intermediate_size=self.intermediate_size, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, out_indices=self.out_indices, ) def create_and_check_for_pose_estimation(self, config, pixel_values, labels): model = VitPoseForPoseEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = (self.image_size[0] // self.patch_size[0]) * self.scale_factor expected_width = (self.image_size[1] // self.patch_size[1]) * self.scale_factor self.parent.assertEqual( result.heatmaps.shape, (self.batch_size, self.num_labels, expected_height, expected_width) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VitPoseModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as VitPose does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (VitPoseForPoseEstimation,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VitPoseModelTester(self) self.config_tester = ConfigTester(self, config_class=VitPoseConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="VitPose does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="VitPose does not support input and output embeddings") def test_inputs_embeds(self): pass @unittest.skip(reason="VitPose does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="VitPose does not support training yet") def test_training(self): pass @unittest.skip(reason="VitPose does not support training yet") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="VitPose does not support training yet") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="VitPose does not support training yet") def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_for_pose_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pose_estimation(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "usyd-community/vitpose-base-simple" model = VitPoseForPoseEstimation.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of people in house def prepare_img(): url = "http://images.cocodataset.org/val2017/000000000139.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_torch @require_vision class VitPoseModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( VitPoseImageProcessor.from_pretrained("usyd-community/vitpose-base-simple") if is_vision_available() else None ) @slow def test_inference_pose_estimation(self): image_processor = self.default_image_processor model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple", device_map=torch_device) image = prepare_img() boxes = [[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]]] inputs = image_processor(images=image, boxes=boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) heatmaps = outputs.heatmaps assert heatmaps.shape == (2, 17, 64, 48) expected_slice = torch.tensor( [ [9.9330e-06, 9.9330e-06, 9.9330e-06], [9.9330e-06, 9.9330e-06, 9.9330e-06], [9.9330e-06, 9.9330e-06, 9.9330e-06], ] ).to(torch_device) assert torch.allclose(heatmaps[0, 0, :3, :3], expected_slice, atol=1e-4) pose_results = image_processor.post_process_pose_estimation(outputs, boxes=boxes)[0] expected_bbox = torch.tensor([391.9900, 190.0800, 391.1575, 189.3034]) expected_keypoints = torch.tensor( [ [3.9813e02, 1.8184e02], [3.9828e02, 1.7981e02], [3.9596e02, 1.7948e02], ] ) expected_scores = torch.tensor([8.7529e-01, 8.4315e-01, 9.2678e-01]) self.assertEqual(len(pose_results), 2) torch.testing.assert_close(pose_results[1]["bbox"].cpu(), expected_bbox, rtol=1e-4, atol=1e-4) torch.testing.assert_close(pose_results[1]["keypoints"][:3].cpu(), expected_keypoints, rtol=1e-2, atol=1e-2) torch.testing.assert_close(pose_results[1]["scores"][:3].cpu(), expected_scores, rtol=1e-4, atol=1e-4) @slow def test_batched_inference(self): image_processor = self.default_image_processor model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple", device_map=torch_device) image = prepare_img() boxes = [ [[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]], [[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]], ] inputs = image_processor(images=[image, image], boxes=boxes, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) heatmaps = outputs.heatmaps assert heatmaps.shape == (4, 17, 64, 48) expected_slice = torch.tensor( [ [9.9330e-06, 9.9330e-06, 9.9330e-06], [9.9330e-06, 9.9330e-06, 9.9330e-06], [9.9330e-06, 9.9330e-06, 9.9330e-06], ] ).to(torch_device) assert torch.allclose(heatmaps[0, 0, :3, :3], expected_slice, atol=1e-4) pose_results = image_processor.post_process_pose_estimation(outputs, boxes=boxes) print(pose_results) expected_bbox = torch.tensor([391.9900, 190.0800, 391.1575, 189.3034]) expected_keypoints = torch.tensor( [ [3.9813e02, 1.8184e02], [3.9828e02, 1.7981e02], [3.9596e02, 1.7948e02], ] ) expected_scores = torch.tensor([8.7529e-01, 8.4315e-01, 9.2678e-01]) self.assertEqual(len(pose_results), 2) self.assertEqual(len(pose_results[0]), 2) torch.testing.assert_close(pose_results[0][1]["bbox"].cpu(), expected_bbox, rtol=1e-4, atol=1e-4) torch.testing.assert_close(pose_results[0][1]["keypoints"][:3].cpu(), expected_keypoints, rtol=1e-2, atol=1e-2) torch.testing.assert_close(pose_results[0][1]["scores"][:3].cpu(), expected_scores, rtol=1e-4, atol=1e-4)
transformers/tests/models/vitpose/test_modeling_vitpose.py/0
{ "file_path": "transformers/tests/models/vitpose/test_modeling_vitpose.py", "repo_id": "transformers", "token_count": 5394 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import pytest from transformers import WhisperTokenizer, is_speech_available from transformers.testing_utils import require_sentencepiece, require_torch, require_torchaudio from .test_feature_extraction_whisper import floats_list if is_speech_available(): from transformers import WhisperFeatureExtractor, WhisperProcessor TRANSCRIBE = 50358 NOTIMESTAMPS = 50362 @require_torch @require_torchaudio @require_sentencepiece class WhisperProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "openai/whisper-small.en" self.tmpdirname = tempfile.mkdtemp() def get_tokenizer(self, **kwargs): return WhisperTokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return WhisperFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = WhisperProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, WhisperTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = WhisperProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, WhisperTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", ) def test_get_decoder_prompt_ids(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) forced_decoder_ids = processor.get_decoder_prompt_ids(task="transcribe", no_timestamps=True) self.assertIsInstance(forced_decoder_ids, list) for ids in forced_decoder_ids: self.assertIsInstance(ids, (list, tuple)) expected_ids = [TRANSCRIBE, NOTIMESTAMPS] self.assertListEqual([ids[-1] for ids in forced_decoder_ids], expected_ids) def test_get_prompt_ids(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) prompt_ids = processor.get_prompt_ids("Mr. Quilter") decoded_prompt = processor.tokenizer.decode(prompt_ids) self.assertListEqual(prompt_ids.tolist(), [50360, 1770, 13, 2264, 346, 353]) self.assertEqual(decoded_prompt, "<|startofprev|> Mr. Quilter") def test_empty_get_prompt_ids(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) prompt_ids = processor.get_prompt_ids("") decoded_prompt = processor.tokenizer.decode(prompt_ids) self.assertListEqual(prompt_ids.tolist(), [50360, 220]) self.assertEqual(decoded_prompt, "<|startofprev|> ") def test_get_prompt_ids_with_special_tokens(self): processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) def _test_prompt_error_raised_helper(prompt, special_token): with pytest.raises(ValueError) as excinfo: processor.get_prompt_ids(prompt) expected = f"Encountered text in the prompt corresponding to disallowed special token: {special_token}." self.assertEqual(expected, str(excinfo.value)) _test_prompt_error_raised_helper("<|startofprev|> test", "<|startofprev|>") _test_prompt_error_raised_helper("test <|notimestamps|>", "<|notimestamps|>") _test_prompt_error_raised_helper("test <|zh|> test <|transcribe|>", "<|zh|>")
transformers/tests/models/whisper/test_processor_whisper.py/0
{ "file_path": "transformers/tests/models/whisper/test_processor_whisper.py", "repo_id": "transformers", "token_count": 2869 }
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_sdpa, slow, ) if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class XLMRobertaModelIntegrationTest(unittest.TestCase): @slow def test_xlm_roberta_base(self): model = XLMRobertaModel.from_pretrained("FacebookAI/xlm-roberta-base", attn_implementation="eager") input_ids = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house expected_output_shape = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3) @require_torch_sdpa def test_xlm_roberta_base_sdpa(self): input_ids = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house expected_output_shape = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) model = XLMRobertaModel.from_pretrained("FacebookAI/xlm-roberta-base", attn_implementation="sdpa") with torch.no_grad(): output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3) @slow def test_xlm_roberta_large(self): model = XLMRobertaModel.from_pretrained("FacebookAI/xlm-roberta-large") input_ids = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]]) # The dog is cute and lives in the garden house expected_output_shape = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim expected_output_values_last_dim = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): output = model(input_ids)["last_hidden_state"].detach() self.assertEqual(output.shape, expected_output_shape) # compare the actual values for a slice of last dim torch.testing.assert_close(output[:, :, -1], expected_output_values_last_dim, rtol=1e-3, atol=1e-3)
transformers/tests/models/xlm_roberta/test_modeling_xlm_roberta.py/0
{ "file_path": "transformers/tests/models/xlm_roberta/test_modeling_xlm_roberta.py", "repo_id": "transformers", "token_count": 1771 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Zamba model.""" import math import tempfile import unittest import pytest from transformers import AutoTokenizer, ZambaConfig, is_torch_available from transformers.testing_utils import ( require_bitsandbytes, require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( ZambaForCausalLM, ZambaForSequenceClassification, ZambaModel, ) from transformers.models.zamba.modeling_zamba import ( ZambaHybridDynamicCache, ) class ZambaModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=64, mamba_dt_rank=32, num_hidden_layers=5, attn_layer_offset=1, attn_layer_period=8, num_attention_heads=4, num_key_value_heads=4, n_mamba_heads=2, intermediate_size=37, hidden_act="gelu", hidden_mamba_act="silu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.mamba_dt_rank = mamba_dt_rank self.num_hidden_layers = num_hidden_layers self.attn_layer_offset = attn_layer_offset self.attn_layer_period = attn_layer_period self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.n_mamba_heads = n_mamba_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_mamba_act = hidden_mamba_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return ZambaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, mamba_dt_rank=self.mamba_dt_rank, num_hidden_layers=self.num_hidden_layers, attn_layer_offset=self.attn_layer_offset, attn_layer_period=self.attn_layer_period, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, n_mamba_heads=self.n_mamba_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_mamba_act=self.hidden_mamba_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=True, initializer_range=self.initializer_range, use_mamba_kernels=False, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = ZambaModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = ZambaForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) result = model(input_ids, attention_mask=input_mask) result = model(input_ids, labels=token_labels) result = model(input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True config.add_cross_attention = True model = ZambaForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass # Attention: Zamba needs the cache to be initialized to return a cache! past_key_values = ZambaHybridDynamicCache(config, input_ids.shape[0], model.dtype, device=model.device) outputs = model( input_ids, attention_mask=input_mask, past_key_values=past_key_values, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, cache_position=torch.arange( input_ids.shape[1], input_ids.shape[1] + next_tokens.shape[1], device=model.device ), )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ZambaForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ZambaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ZambaModel, ZambaForCausalLM, ZambaForSequenceClassification, ) if is_torch_available() else () ) all_generative_model_classes = (ZambaForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": ZambaModel, "text-classification": ZambaForSequenceClassification, "text-generation": ZambaForCausalLM, "zero-shot": ZambaForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False def setUp(self): self.model_tester = ZambaModelTester(self) self.config_tester = ConfigTester(self, config_class=ZambaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_casual_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_initialization(self): r""" Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "A_log" in name: A = torch.arange(1, config.mamba_d_state + 1, dtype=torch.float32)[None, :] intermediate_dim = config.mamba_expand * config.hidden_size A = A.expand(intermediate_dim, -1).reshape( config.n_mamba_heads, intermediate_dim // config.n_mamba_heads, -1 ) torch.testing.assert_close(param.data, torch.log(A), rtol=1e-5, atol=1e-5) elif "D" in name: # check if it's a ones like torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5) elif "x_proj" in name or "dt_proj_weight" in name: self.assertIn( ((param.data.mean() * 1e2).round() / 1e2).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized (raw value {param.data.mean()})", ) elif "dt_proj_bias" in name: dt = torch.exp( torch.tensor([0, 1]) * (math.log(config.time_step_max) - math.log(config.time_step_min)) + math.log(config.time_step_min) ).clamp(min=config.time_step_floor) inv_dt = dt + torch.log(-torch.expm1(-dt)) if param.requires_grad: self.assertTrue(param.data.max().item() <= inv_dt[1]) self.assertTrue(param.data.min().item() >= inv_dt[0]) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_mismatched_shapes_have_properly_initialized_weights(self): r""" Overriding the test_mismatched_shapes_have_properly_initialized_weights test because A_log and D params of the Mamba block are initialized differently and we tested that in test_initialization """ self.skipTest("Cumbersome and redundant for Zamba") def test_attention_outputs(self): r""" Overriding the test_attention_outputs test as the Zamba model outputs attention only for its attention layers """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) expected_num_attentions = ( math.ceil( (self.model_tester.num_hidden_layers - self.model_tester.attn_layer_offset) / self.model_tester.attn_layer_period ) + 1 ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def _get_input_ids_and_config(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs return config, input_ids, input_mask def test_left_padding_compatibility(self): r""" Overriding the test_left_padding_compatibility test as the mamba layers accentuate the numerical differences effect of the left padding discussed in the issue in the note. Using a more permissive tolerance value. """ import inspect # NOTE: left-padding results in small numerical differences. This is expected. # See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 # First, filter out models that don't support left padding - generative and decoder-only. # Zamba is a decoder-only architecture decoder_only_classes = self.all_generative_model_classes # Then, test left-padding def _prepare_model_kwargs(input_ids, attention_mask, signature): model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(attention_mask, dim=-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids if "cache_position" in signature: cache_position = torch.arange(input_ids.shape[-1], device=torch_device) model_kwargs["cache_position"] = cache_position return model_kwargs for model_class in decoder_only_classes: config, input_ids, attention_mask = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() signature = inspect.signature(model.forward).parameters.keys() # Without padding model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, signature) next_logits_wo_padding = model(**model_kwargs).logits[:, -1, :] # With left-padding (length 32) pad_size = (input_ids.shape[0], 32) padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * config.pad_token_id padded_input_ids = torch.cat((padding, input_ids), dim=1) padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1) model_kwargs = _prepare_model_kwargs(padded_input_ids, padded_attention_mask, signature) next_logits_with_padding = model(**model_kwargs).logits[:, -1, :] # They should result in very similar logits torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=3e-3, atol=3e-3) @require_flash_attn @require_torch_gpu @require_bitsandbytes @pytest.mark.flash_attn_test @slow def test_flash_attn_2_fp32_ln(self): r""" Overriding the test_flash_attn_2_fp32_ln test as the Zamba model, like Mixtral, doesn't support right padding + use cache with FA2 """ for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_input = inputs_dict[model.main_input_name] dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # NOTE: Zamba does not support right padding + use_cache with FA2. dummy_attention_mask[:, -1] = 1 model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, load_in_4bit=True, ) for _, param in model.named_parameters(): # upcast only layer norms if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) _ = model(dummy_input) # with attention mask _ = model(dummy_input, attention_mask=dummy_attention_mask) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): r""" Overriding the test_flash_attn_2_inference_padding_right test as the Zamba model, like Mixtral, doesn't support right padding + use cache with FA2 """ self.skipTest(reason="Zamba flash attention does not support right padding") @require_torch class ZambaModelIntegrationTest(unittest.TestCase): model = None tokenizer = None @classmethod @slow def setUpClass(cls): model_id = "Zyphra/Zamba-7B-v1" cls.model = ZambaForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, use_mamba_kernels=False ) cls.tokenizer = AutoTokenizer.from_pretrained(model_id) @slow def test_simple_generate(self): self.model.to(torch_device) input_ids = self.tokenizer("Hey how are you doing on this lovely evening?", return_tensors="pt")[ "input_ids" ].to(torch_device) out = self.model.generate(input_ids, do_sample=False, max_new_tokens=10) output_sentence = self.tokenizer.decode(out[0, :]) self.assertEqual( output_sentence, "<s> Hey how are you doing on this lovely evening? I hope you are all doing well. I am", ) with torch.no_grad(): logits = self.model(input_ids=input_ids).logits EXPECTED_LOGITS_NO_GRAD = torch.tensor( [ -7.9375, 8.1875, 1.3984, -6.0000, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, 2.7500, 13.0625, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375 ] , dtype=torch.float32) # fmt: skip torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD, rtol=1e-3, atol=1e-3) @slow def test_simple_batched_generate_with_padding(self): self.model.to(torch_device) self.tokenizer.add_special_tokens({"pad_token": "[PAD]"}) self.model.resize_token_embeddings(len(self.tokenizer)) inputs = self.tokenizer( ["Hey how are you doing on this lovely evening?", "Tell me a story"], padding=True, return_tensors="pt" ).to(torch_device) out = self.model.generate(**inputs, do_sample=False, max_new_tokens=10) output_sentences = self.tokenizer.batch_decode(out) self.assertEqual( output_sentences[0], "<s> Hey how are you doing on this lovely evening? I hope you are all doing well. I am", ) self.assertEqual( output_sentences[1], "[PAD][PAD][PAD][PAD][PAD][PAD]<s> Tell me a story about a time when you were in a difficult situation", ) with torch.no_grad(): logits = self.model(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"]).logits EXPECTED_LOGITS_NO_GRAD_0 = torch.tensor( [ -7.9375, 8.1250, 1.3594, -6.0000, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, 2.7344, 13.0625, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375, -7.9375 ] , dtype=torch.float32) # fmt: skip EXPECTED_LOGITS_NO_GRAD_1 = torch.tensor( [ -6.3750, 3.4219, 0.6719, -5.0312, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, 2.0625, 10.3750, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000, -8.5000 ] , dtype=torch.float32) # fmt: skip torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_0, rtol=1e-3, atol=1e-3) torch.testing.assert_close(logits[1, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_1, rtol=1e-3, atol=1e-3)
transformers/tests/models/zamba/test_modeling_zamba.py/0
{ "file_path": "transformers/tests/models/zamba/test_modeling_zamba.py", "repo_id": "transformers", "token_count": 12980 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, AutoProcessor, TextToAudioPipeline, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.trainer_utils import set_seed from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class TextToAudioPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING # for now only test text_to_waveform and not text_to_spectrogram @slow @require_torch def test_small_musicgen_pt(self): music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": False, "max_new_tokens": 250, } outputs = music_generator("This is a test", forward_params=forward_params) self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 32000}, outputs) # test two examples side-by-side outputs = music_generator(["This is a test", "This is a second test"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = music_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2 ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch def test_medium_seamless_m4t_pt(self): speech_generator = pipeline(task="text-to-audio", model="facebook/hf-seamless-m4t-medium", framework="pt") for forward_params in [{"tgt_lang": "eng"}, {"return_intermediate_token_ids": True, "tgt_lang": "eng"}]: outputs = speech_generator("This is a test", forward_params=forward_params) self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 16000}, outputs) # test two examples side-by-side outputs = speech_generator(["This is a test", "This is a second test"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2 ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch def test_small_bark_pt(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt") forward_params = { # Using `do_sample=False` to force deterministic output "do_sample": False, "semantic_max_new_tokens": 100, } outputs = speech_generator("This is a test", forward_params=forward_params) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) # test two examples side-by-side outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test other generation strategy forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, "semantic_num_return_sequences": 2, } outputs = speech_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # test using a speaker embedding processor = AutoProcessor.from_pretrained("suno/bark-small") temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch_accelerator def test_conversion_additional_tensor(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=torch_device) processor = AutoProcessor.from_pretrained("suno/bark-small") forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, } # atm, must do to stay coherent with BarkProcessor preprocess_params = { "max_length": 256, "add_special_tokens": False, "return_attention_mask": True, "return_token_type_ids": False, "padding": "max_length", } outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params, ) temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt # history_prompt is a torch.Tensor passed as a forward_param # if generation is successful, it means that it was passed to the right device outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params ) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) @slow @require_torch def test_vits_model_pt(self): speech_generator = pipeline(task="text-to-audio", model="facebook/mms-tts-eng", framework="pt") outputs = speech_generator("This is a test") self.assertEqual(outputs["sampling_rate"], 16000) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # test two examples side-by-side outputs = speech_generator(["This is a test", "This is a second test"]) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = speech_generator(["This is a test", "This is a second test"], batch_size=2) self.assertEqual(ANY(np.ndarray), outputs[0]["audio"]) @slow @require_torch def test_forward_model_kwargs(self): # use vits - a forward model speech_generator = pipeline(task="text-to-audio", model="kakao-enterprise/vits-vctk", framework="pt") # for reproducibility set_seed(555) outputs = speech_generator("This is a test", forward_params={"speaker_id": 5}) audio = outputs["audio"] with self.assertRaises(TypeError): # assert error if generate parameter outputs = speech_generator("This is a test", forward_params={"speaker_id": 5, "do_sample": True}) forward_params = {"speaker_id": 5} generate_kwargs = {"do_sample": True} with self.assertRaises(ValueError): # assert error if generate_kwargs with forward-only models outputs = speech_generator( "This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs ) self.assertTrue(np.abs(outputs["audio"] - audio).max() < 1e-5) @slow @require_torch def test_generative_model_kwargs(self): # use musicgen - a generative model music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": True, "max_new_tokens": 250, } # for reproducibility set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # make sure generate kwargs get priority over forward params forward_params = { "do_sample": False, "max_new_tokens": 250, } generate_kwargs = {"do_sample": True} # for reproducibility set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs) self.assertListEqual(outputs["audio"].tolist(), audio.tolist()) def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): speech_generator = TextToAudioPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, ) return speech_generator, ["This is a test", "Another test"] def run_pipeline_test(self, speech_generator, _): outputs = speech_generator("This is a test") self.assertEqual(ANY(np.ndarray), outputs["audio"]) forward_params = ( {"num_return_sequences": 2, "do_sample": True} if speech_generator.model.can_generate() else {} ) outputs = speech_generator(["This is great !", "Something else"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
transformers/tests/pipelines/test_pipelines_text_to_audio.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_text_to_audio.py", "repo_id": "transformers", "token_count": 4480 }
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HiggsConfig, OPTForCausalLM from transformers.testing_utils import ( require_accelerate, require_flute_hadamard, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights @require_torch_gpu class HiggsConfigTest(unittest.TestCase): def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = HiggsConfig() config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = {"modules_to_not_convert": ["embed_tokens", "lm_head"], "quant_method": "higgs"} quantization_config = HiggsConfig.from_dict(dict) self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert) self.assertEqual(dict["quant_method"], quantization_config.quant_method) @slow @require_torch_gpu @require_flute_hadamard @require_accelerate # @require_read_token class HiggsTest(unittest.TestCase): model_name = "meta-llama/Meta-Llama-3.1-8B" input_text = "A quick brown fox jumps over the" max_new_tokens = 2 EXPECTED_OUTPUT = "A quick brown fox jumps over the lazy dog" device_map = "cuda" # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ quantization_config = HiggsConfig() cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, quantization_config=quantization_config ) def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from transformers.integrations import HiggsLinear, replace_with_higgs_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = HiggsConfig() with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_higgs_linear(model, quantization_config=quantization_config) nb_higgs_linear = 0 for module in model.modules(): if isinstance(module, HiggsLinear): nb_higgs_linear += 1 self.assertEqual(nb_linears - 1, nb_higgs_linear) with init_empty_weights(): model = OPTForCausalLM(config) quantization_config = HiggsConfig(modules_to_not_convert=["fc1"]) model, _ = replace_with_higgs_linear(model, quantization_config=quantization_config) nb_higgs_linear = 0 for module in model.modules(): if isinstance(module, HiggsLinear): nb_higgs_linear += 1 self.assertEqual(nb_linears - 24, nb_higgs_linear) def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUS """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantization_config = HiggsConfig() quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map="auto", quantization_config=quantization_config ) self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_save_pretrained_multi_gpu(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto") self.assertTrue(set(model.hf_device_map.values()) == {0, 1}) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @unittest.skip("This will almost surely OOM. Enable when swithed to a smaller model") def test_dequantize(self): """ Test the ability to dequantize a model """ self.quantized_model.dequantize() input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
transformers/tests/quantization/higgs/test_higgs.py/0
{ "file_path": "transformers/tests/quantization/higgs/test_higgs.py", "repo_id": "transformers", "token_count": 3092 }
# we define a fixture function below and it will be "used" by # referencing its name from tests import os import pytest from attr import dataclass os.environ["AWS_DEFAULT_REGION"] = "us-east-1" # defaults region @dataclass class SageMakerTestEnvironment: framework: str role = "arn:aws:iam::558105141721:role/sagemaker_execution_role" hyperparameters = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } distributed_hyperparameters = {**hyperparameters, "max_steps": 1000} @property def metric_definitions(self) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def base_job_name(self) -> str: return f"{self.framework}-transfromers-test" @property def test_path(self) -> str: return f"./tests/sagemaker/scripts/{self.framework}" @property def image_uri(self) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class") def sm_env(request): request.cls.env = SageMakerTestEnvironment(framework=request.cls.framework)
transformers/tests/sagemaker/conftest.py/0
{ "file_path": "transformers/tests/sagemaker/conftest.py", "repo_id": "transformers", "token_count": 1035 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import json import random import tempfile from typing import List, Tuple import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.cache_utils import DynamicCache from transformers.models.auto import get_values from transformers.testing_utils import CaptureLogger, is_pt_flax_cross_test, require_flax, torch_device from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging from transformers.utils.generic import ModelOutput if is_flax_available(): import os import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict from transformers import ( FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, FLAX_MODEL_MAPPING, FlaxAutoModel, FlaxAutoModelForSequenceClassification, FlaxBertModel, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.modeling_flax_utils import FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return np.array(values, dtype=jnp.float32).reshape(shape) def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask def get_params(params, from_head_prefix=None): """Function extracts relevant parameters into flatten dict from model params, appends batch normalization statistics if present""" # If Both parameters and batch normalization statistics are present if "batch_stats" in params: # Extract only parameters for the specified head prefix (if specified) and add batch statistics if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params["params"][from_head_prefix])) extracted_params.update(flatten_dict(params["batch_stats"][from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params["params"])) extracted_params.update(flatten_dict(params["batch_stats"])) # Only parameters are present else: if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params[from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params)) return extracted_params @require_flax class FlaxModelTesterMixin: model_tester = None all_model_classes = () test_mismatched_shapes = True is_encoder_decoder = False test_head_masking = False has_attentions = True def _prepare_for_class(self, inputs_dict, model_class): inputs_dict = copy.deepcopy(inputs_dict) # hack for now until we have AutoModel classes if "ForMultipleChoice" in model_class.__name__: inputs_dict = { k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1])) if isinstance(v, (jnp.ndarray, np.ndarray)) and k != "indices_prng_key" else v for k, v in inputs_dict.items() } return inputs_dict def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assert_almost_equals(jnp.nan_to_num(tuple_object), jnp.nan_to_num(dict_object), 1e-5) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) # (Copied from tests.test_modeling_common.ModelTesterMixin.check_pt_flax_outputs) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None): """ Args: model_class: The class of the model that is currently testing. For example, ..., etc. Currently unused, but it could make debugging easier and faster. names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs. Currently unused, but in the future, we could use this information to make the error message clearer by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): if isinstance(pt_output, DynamicCache): pt_output = pt_output.to_legacy_cache() self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) # Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`. fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): # It might be better to put this inside the for loop below (because we modify the config there). # But logically, it is fine. config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained( tmpdirname, from_flax=True, attn_implementation=fx_model.config._attn_implementation ) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_from_pretrained_save_pretrained(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs_dict).to_tuple() # verify that normal save_pretrained works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) # verify that save_pretrained for distributed training # with `params=params` works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=model.params) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids", "attention_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_naming_convention(self): for model_class in self.all_model_classes: model_class_name = model_class.__name__ module_class_name = ( model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module" ) bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name]) module_cls = getattr(bert_modeling_flax_module, module_class_name) self.assertIsNotNone(module_cls) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # Question Answering model returns start_logits and end_logits if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(ValueError): new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = FlaxAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_flax_utils") with CaptureLogger(logger) as cl: new_model = FlaxAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs_dict)["logits"] self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = FlaxAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_default_params_dtype(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # check if all params are still in float32 when dtype of computation is half-precision model = model_class(config, dtype=jnp.float16) types = jax.tree_util.tree_map(lambda x: x.dtype, model.params) types = flatten_dict(types) for name, type_ in types.items(): self.assertEqual(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.") def test_to_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to bf16 params = model.to_bf16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_bf16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_to_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 params = model.to_fp16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_fp16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_to_fp32(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 and back to fp32 params = model.to_fp16(model.params) params = model.to_fp32(params) # test if all params are in fp32 types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) # cast to fp16 and back to fp32 with mask params = model.to_fp16(model.params) params = model.to_fp32(params, mask) # test if all params are in fp32 except key types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.") else: self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") def test_save_load_in_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to fp16 and save params = model.to_fp16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_save_load_in_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to bf16 and save params = model.to_bf16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "__call__")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_headmasking(self): if not self.test_head_masking: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return np.concatenate([np.zeros(1, dtype=jnp.int32), np.ones(attention_heads - 1, dtype=jnp.int32)]) if i == num_hidden_layers - 1: return np.concatenate([np.zeros(attention_heads - 1, dtype=jnp.int32), np.ones(1, dtype=jnp.int32)]) return np.ones(attention_heads, dtype=jnp.int32) for model_class in self.all_model_classes: model = model_class(config) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False inputs = self._prepare_for_class(inputs_dict, model_class).copy() # Prepare head mask inputs["head_mask"] = np.stack( [ _prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ] ) outputs = model(**inputs) def _check_attentions_validity(attentions): # Remove NaN for t in attentions: # Check we don't have more than 25% nans (arbitrary) self.assertLess(np.isnan(t).sum(), t.size / 4) attentions = [np.where(np.isnan(t), 0.0, t) for t in attentions] self.assertAlmostEqual(attentions[0][..., 0, :, :].sum(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].sum(), 0.0) if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules self.assertNotEqual(attentions[1][..., 0, :, :].sum(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].sum(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].sum(), 0.0) if model.config.is_encoder_decoder: raise NotImplementedError("The test has not been implemented for encoder-decoder models yet.") else: _check_attentions_validity(outputs.attentions) def test_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: model = model_class(config, _do_init=False) # Check that accesing parmas raises an ValueError when _do_init is False with self.assertRaises(ValueError): params = model.params # Check if we params can be properly initialized when calling init_weights params = model.init_weights(model.key, model.input_shape) assert isinstance(params, (dict, FrozenDict)), f"params are not an instance of {FrozenDict}" # Check if all required parmas are initialized keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) # Check if the shapes match flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) # Check that setting params raises an ValueError when _do_init is False with self.assertRaises(ValueError): model.params = params # Check if we can do a forward pass inputs_dict["output_hidden_states"] = True inputs = self._prepare_for_class(inputs_dict, model_class).copy() model(**inputs, params=params) def test_from_pretrained_with_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _assert_all_params_initialised(model, params): # Check if all required parmas are loaded keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) # Check if the shapes match flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) for model_class in self.all_model_classes: # init the model model = model_class(config) # save the model in the temporary directory # load the saved model with _do_init=False with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) # Check that accesing parmas raises an ValueError when _do_init is False with self.assertRaises(ValueError): params = model.params # Check if all required parmas are loaded _assert_all_params_initialised(model, params) # Check that setting params raises an ValueError when _do_init is False with self.assertRaises(ValueError): model.params = params # Check if init_weights initializes missing keys from from_pretrained flat_params = flatten_dict(unfreeze(params)) random_key = random.choice(list(flat_params.keys())) flat_params.pop(random_key) params = freeze(unflatten_dict(flat_params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) params = model.init_weights(model.key, model.input_shape, params=params) # Check if all required parmas are loaded _assert_all_params_initialised(model, params) def test_checkpoint_sharding_from_hub(self): model = FlaxBertModel.from_pretrained("ArthurZ/flax-tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(ref_model.params).values()): assert np.allclose(np.array(p1), np.array(p2)) def test_checkpoint_sharding_local(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".msgpack"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, FLAX_WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: with open(shard_file, "rb") as state_f: state_file = from_bytes(FlaxBertModel, state_f.read()) self.assertEqual(len(state_file), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".msgpack")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = FlaxBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(new_model.params).values()): self.assertTrue(np.allclose(np.array(p1), np.array(p2))) @is_pt_flax_cross_test def test_from_sharded_pt(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True) ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-fx-only") for key, ref_val in flatten_dict(ref_model.params).items(): val = flatten_dict(model.params)[key] assert np.allclose(np.array(val), np.array(ref_val)) def test_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) remat_model = model_class(config) try: remat_model.enable_gradient_checkpointing() except NotImplementedError: continue outputs = model(**prepared_inputs_dict) remat_outputs = remat_model(**prepared_inputs_dict) # ensure that the dicts of outputs contain the same keys self.assertEqual(outputs.keys(), remat_outputs.keys()) outputs = outputs.to_tuple() remat_outputs = remat_outputs.to_tuple() # ensure that the outputs remain precisely equal for output, remat_output in zip(outputs, remat_outputs): self.assertTrue((output == remat_output).all())
transformers/tests/test_modeling_flax_common.py/0
{ "file_path": "transformers/tests/test_modeling_flax_common.py", "repo_id": "transformers", "token_count": 25289 }
# coding=utf-8 # Copyright 2020 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import ( AutoModelForSeq2SeqLM, BertTokenizer, DataCollatorForSeq2Seq, EncoderDecoderModel, GenerationConfig, Seq2SeqTrainer, Seq2SeqTrainingArguments, T5Tokenizer, ) from transformers.testing_utils import TestCasePlus, require_sentencepiece, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets @require_sentencepiece class Seq2seqTrainerTester(TestCasePlus): @slow @require_torch def test_finetune_bert2bert(self): bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny") tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_size bert2bert.config.eos_token_id = tokenizer.sep_token_id bert2bert.config.decoder_start_token_id = tokenizer.cls_token_id bert2bert.config.max_length = 128 train_dataset = datasets.load_dataset("abisee/cnn_dailymail", "3.0.0", split="train[:1%]") val_dataset = datasets.load_dataset("abisee/cnn_dailymail", "3.0.0", split="validation[:1%]") train_dataset = train_dataset.select(range(32)) val_dataset = val_dataset.select(range(16)) batch_size = 4 def _map_to_encoder_decoder_inputs(batch): # Tokenizer will automatically set [BOS] <text> [EOS] inputs = tokenizer(batch["article"], padding="max_length", truncation=True, max_length=512) outputs = tokenizer(batch["highlights"], padding="max_length", truncation=True, max_length=128) batch["input_ids"] = inputs.input_ids batch["attention_mask"] = inputs.attention_mask batch["decoder_input_ids"] = outputs.input_ids batch["labels"] = outputs.input_ids.copy() batch["labels"] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] batch["decoder_attention_mask"] = outputs.attention_mask assert all(len(x) == 512 for x in inputs.input_ids) assert all(len(x) == 128 for x in outputs.input_ids) return batch def _compute_metrics(pred): labels_ids = pred.label_ids pred_ids = pred.predictions # all unnecessary tokens are removed pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True) accuracy = sum([int(pred_str[i] == label_str[i]) for i in range(len(pred_str))]) / len(pred_str) return {"accuracy": accuracy} # map train dataset train_dataset = train_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) train_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) # same for validation dataset val_dataset = val_dataset.map( _map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=["article", "highlights"], ) val_dataset.set_format( type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], ) output_dir = self.get_auto_remove_tmp_dir() training_args = Seq2SeqTrainingArguments( output_dir=output_dir, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, predict_with_generate=True, eval_strategy="steps", do_train=True, do_eval=True, warmup_steps=0, eval_steps=2, logging_steps=2, report_to="none", ) # instantiate trainer trainer = Seq2SeqTrainer( model=bert2bert, args=training_args, compute_metrics=_compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, processing_class=tokenizer, ) # start training trainer.train() @slow @require_torch def test_return_sequences(self): # Tests that the number of generated sequences is correct when num_return_sequences > 1 # and essentially ensuring that `accelerator.gather()` is used instead of `gather_for_metrics` INPUT_COLUMN = "question" TARGET_COLUMN = "answer" MAX_INPUT_LENGTH = 256 MAX_TARGET_LENGTH = 256 dataset = datasets.load_dataset("openai/gsm8k", "main", split="train[:38]") model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest") gen_config = GenerationConfig.from_pretrained( "google-t5/t5-small", max_length=None, min_length=None, max_new_tokens=256, min_new_tokens=1, num_beams=5 ) training_args = Seq2SeqTrainingArguments(".", predict_with_generate=True, report_to="none") trainer = Seq2SeqTrainer( model=model, args=training_args, processing_class=tokenizer, data_collator=data_collator, compute_metrics=lambda x: {"samples": x[0].shape[0]}, ) def prepare_data(examples): # Remove pairs where at least one record is none inputs = examples[INPUT_COLUMN] targets = examples[TARGET_COLUMN] model_inputs = tokenizer(inputs, max_length=MAX_INPUT_LENGTH, truncation=True) labels = tokenizer(text_target=targets, max_length=MAX_TARGET_LENGTH, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs prepared_dataset = dataset.map(prepare_data, batched=True, remove_columns=[INPUT_COLUMN, TARGET_COLUMN]) dataset_len = len(prepared_dataset) # 38 for num_return_sequences in range(3, 0, -1): gen_config.num_return_sequences = num_return_sequences metrics = trainer.evaluate(eval_dataset=prepared_dataset, generation_config=gen_config) assert ( metrics["eval_samples"] == dataset_len * num_return_sequences ), f"Got {metrics['eval_samples']}, expected: {dataset_len * num_return_sequences}" @require_torch def test_bad_generation_config_fail_early(self): # Tests that a bad geneartion config causes the trainer to fail early model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="pt", padding="longest") gen_config = GenerationConfig(do_sample=False, top_p=0.9) # bad: top_p is not compatible with do_sample=False training_args = Seq2SeqTrainingArguments( ".", predict_with_generate=True, generation_config=gen_config, report_to="none" ) with self.assertRaises(ValueError) as exc: _ = Seq2SeqTrainer( model=model, args=training_args, processing_class=tokenizer, data_collator=data_collator, compute_metrics=lambda x: {"samples": x[0].shape[0]}, ) self.assertIn("The loaded generation config instance is invalid", str(exc.exception))
transformers/tests/trainer/test_trainer_seq2seq.py/0
{ "file_path": "transformers/tests/trainer/test_trainer_seq2seq.py", "repo_id": "transformers", "token_count": 3785 }
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock import warnings from pathlib import Path from huggingface_hub import HfFolder from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPT2Config from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, TemporaryHubRepo, is_staging_test sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 config_common_kwargs = { "return_dict": False, "output_hidden_states": True, "output_attentions": True, "torchscript": True, "torch_dtype": "float16", "use_bfloat16": True, "tf_legacy_loss": True, "pruned_heads": {"a": 1}, "tie_word_embeddings": False, "is_decoder": True, "cross_attention_hidden_size": 128, "add_cross_attention": True, "tie_encoder_decoder": True, "max_length": 50, "min_length": 3, "do_sample": True, "early_stopping": True, "num_beams": 3, "num_beam_groups": 3, "diversity_penalty": 0.5, "temperature": 2.0, "top_k": 10, "top_p": 0.7, "typical_p": 0.2, "repetition_penalty": 0.8, "length_penalty": 0.8, "no_repeat_ngram_size": 5, "encoder_no_repeat_ngram_size": 5, "bad_words_ids": [1, 2, 3], "num_return_sequences": 3, "chunk_size_feed_forward": 5, "output_scores": True, "return_dict_in_generate": True, "forced_bos_token_id": 2, "forced_eos_token_id": 3, "remove_invalid_values": True, "architectures": ["BertModel"], "finetuning_task": "translation", "id2label": {0: "label"}, "label2id": {"label": "0"}, "tokenizer_class": "BertTokenizerFast", "prefix": "prefix", "bos_token_id": 6, "pad_token_id": 7, "eos_token_id": 8, "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), "suppress_tokens": [0, 1], "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } @is_staging_test class ConfigPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) def test_push_to_hub(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub(tmp_repo.repo_id, token=self._token) new_config = BertConfig.from_pretrained(tmp_repo.repo_id) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_via_save_pretrained(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_config = BertConfig.from_pretrained(tmp_repo.repo_id) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub(tmp_repo.repo_id, token=self._token) new_config = BertConfig.from_pretrained(tmp_repo.repo_id) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization_via_save_pretrained(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_config = BertConfig.from_pretrained(tmp_repo.repo_id) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_dynamic_config(self): with TemporaryHubRepo(token=self._token) as tmp_repo: CustomConfig.register_for_auto_class() config = CustomConfig(attribute=42) config.push_to_hub(tmp_repo.repo_id, token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig"}) new_config = AutoConfig.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__, "CustomConfig") self.assertEqual(new_config.attribute, 42) class ConfigTestUtils(unittest.TestCase): def test_config_from_string(self): c = GPT2Config() # attempt to modify each of int/float/bool/str config records and verify they were updated n_embd = c.n_embd + 1 # int resid_pdrop = c.resid_pdrop + 1.0 # float scale_attn_weights = not c.scale_attn_weights # bool summary_type = c.summary_type + "foo" # str c.update_from_string( f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" ) self.assertEqual(n_embd, c.n_embd, "mismatch for key: n_embd") self.assertEqual(resid_pdrop, c.resid_pdrop, "mismatch for key: resid_pdrop") self.assertEqual(scale_attn_weights, c.scale_attn_weights, "mismatch for key: scale_attn_weights") self.assertEqual(summary_type, c.summary_type, "mismatch for key: summary_type") def test_config_common_kwargs_is_complete(self): base_config = PretrainedConfig() missing_keys = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( missing_keys, [ "is_encoder_decoder", "_name_or_path", "_commit_hash", "_attn_implementation_internal", "_attn_implementation_autoset", "transformers_version", ], ) keys_with_defaults = [key for key, value in config_common_kwargs.items() if value == getattr(base_config, key)] if len(keys_with_defaults) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" f" {', '.join(keys_with_defaults)}." ) def test_nested_config_load_from_dict(self): config = AutoConfig.from_pretrained( "hf-internal-testing/tiny-random-CLIPModel", text_config={"num_hidden_layers": 2} ) self.assertNotIsInstance(config.text_config, dict) self.assertEqual(config.text_config.__class__.__name__, "CLIPTextConfig") def test_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder") self.assertIsNotNone(config) config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder", subfolder="bert") self.assertIsNotNone(config) def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() def test_local_versioning(self): configuration = AutoConfig.from_pretrained("google-bert/bert-base-cased") configuration.configuration_files = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(tmp_dir) configuration.hidden_size = 2 json.dump(configuration.to_dict(), open(os.path.join(tmp_dir, "config.4.0.0.json"), "w")) # This should pick the new configuration file as the version of Transformers is > 4.0.0 new_configuration = AutoConfig.from_pretrained(tmp_dir) self.assertEqual(new_configuration.hidden_size, 2) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 configuration.configuration_files = ["config.42.0.0.json"] configuration.hidden_size = 768 configuration.save_pretrained(tmp_dir) shutil.move(os.path.join(tmp_dir, "config.4.0.0.json"), os.path.join(tmp_dir, "config.42.0.0.json")) new_configuration = AutoConfig.from_pretrained(tmp_dir) self.assertEqual(new_configuration.hidden_size, 768) def test_repo_versioning_before(self): # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. repo = "hf-internal-testing/test-two-configs" import transformers as new_transformers new_transformers.configuration_utils.__version__ = "v4.0.0" new_configuration, kwargs = new_transformers.models.auto.AutoConfig.from_pretrained( repo, return_unused_kwargs=True ) self.assertEqual(new_configuration.hidden_size, 2) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(kwargs, {}) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers old_transformers.configuration_utils.__version__ = "v3.0.0" old_configuration = old_transformers.models.auto.AutoConfig.from_pretrained(repo) self.assertEqual(old_configuration.hidden_size, 768) def test_saving_config_with_custom_generation_kwargs_raises_warning(self): config = BertConfig(min_length=3) # `min_length = 3` is a non-default generation kwarg with tempfile.TemporaryDirectory() as tmp_dir: with self.assertWarns(UserWarning) as cm: config.save_pretrained(tmp_dir) self.assertIn("min_length", str(cm.warning)) def test_get_non_default_generation_parameters(self): config = BertConfig() self.assertFalse(len(config._get_non_default_generation_parameters()) > 0) config = BertConfig(min_length=3) self.assertTrue(len(config._get_non_default_generation_parameters()) > 0) config = BertConfig(min_length=0) # `min_length = 0` is a default generation kwarg self.assertFalse(len(config._get_non_default_generation_parameters()) > 0) def test_loading_config_do_not_raise_future_warnings(self): """Regression test for https://github.com/huggingface/transformers/issues/31002.""" # Loading config should not raise a FutureWarning. It was the case before. with warnings.catch_warnings(): warnings.simplefilter("error") PretrainedConfig.from_pretrained("bert-base-uncased")
transformers/tests/utils/test_configuration_utils.py/0
{ "file_path": "transformers/tests/utils/test_configuration_utils.py", "repo_id": "transformers", "token_count": 5685 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, snapshot_download from transformers import BertConfig, BertModel, is_flax_available, is_torch_available from transformers.testing_utils import ( TOKEN, CaptureLogger, TemporaryHubRepo, is_pt_flax_cross_test, is_staging_test, require_flax, require_safetensors, require_torch, ) from transformers.utils import FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_NAME, logging if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch @require_flax @is_staging_test class FlaxModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) def test_push_to_hub(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub(tmp_repo.repo_id, token=self._token) new_model = FlaxBertModel.from_pretrained(tmp_repo.repo_id) base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_push_to_hub_via_save_pretrained(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_model = FlaxBertModel.from_pretrained(tmp_repo.repo_id) base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_push_to_hub_in_organization(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub(tmp_repo.repo_id, token=self._token) new_model = FlaxBertModel.from_pretrained(tmp_repo.repo_id) base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_push_to_hub_in_organization_via_save_pretrained(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_model = FlaxBertModel.from_pretrained(tmp_repo.repo_id) base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def check_models_equal(model1, model2): models_are_equal = True flat_params_1 = flatten_dict(model1.params) flat_params_2 = flatten_dict(model2.params) for key in flat_params_1.keys(): if np.sum(np.abs(flat_params_1[key] - flat_params_2[key])) > 1e-4: models_are_equal = False return models_are_equal @require_flax class FlaxModelUtilsTest(unittest.TestCase): def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) @require_safetensors def test_safetensors_save_and_load(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) # No msgpack file, only a model.safetensors self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME))) new_model = FlaxBertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(model, new_model)) @require_flax @require_torch @is_pt_flax_cross_test def test_safetensors_save_and_load_pt_to_flax(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", from_pt=True) pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: pt_model.save_pretrained(tmp_dir) # Check we have a model.safetensors file self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = FlaxBertModel.from_pretrained(tmp_dir) # Check models are equal self.assertTrue(check_models_equal(model, new_model)) @require_safetensors def test_safetensors_load_from_hub(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub """ flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") # Can load from the Flax-formatted checkpoint safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-safetensors-only") self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_safetensors def test_safetensors_load_from_local(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub """ with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-flax-only", cache_dir=tmp) flax_model = FlaxBertModel.from_pretrained(location) with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-flax-safetensors-only", cache_dir=tmp) safetensors_model = FlaxBertModel.from_pretrained(location) self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_safetensors @is_pt_flax_cross_test def test_safetensors_load_from_hub_from_safetensors_pt(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub. saved in the "pt" format. """ flax_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-msgpack") # Can load from the PyTorch-formatted checkpoint safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_safetensors @require_torch @is_pt_flax_cross_test def test_safetensors_load_from_hub_from_safetensors_pt_bf16(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub. saved in the "pt" format. """ import torch model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors") model.to(torch.bfloat16) with tempfile.TemporaryDirectory() as tmp: model.save_pretrained(tmp) flax_model = FlaxBertModel.from_pretrained(tmp) # Can load from the PyTorch-formatted checkpoint safetensors_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-bf16") self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_safetensors @is_pt_flax_cross_test def test_safetensors_load_from_local_from_safetensors_pt(self): """ This test checks that we can load safetensors from a checkpoint that only has those on the Hub. saved in the "pt" format. """ with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-msgpack", cache_dir=tmp) flax_model = FlaxBertModel.from_pretrained(location) # Can load from the PyTorch-formatted checkpoint with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors", cache_dir=tmp) safetensors_model = FlaxBertModel.from_pretrained(location) self.assertTrue(check_models_equal(flax_model, safetensors_model)) @require_safetensors def test_safetensors_load_from_hub_msgpack_before_safetensors(self): """ This test checks that we'll first download msgpack weights before safetensors The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch """ FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-msgpack") @require_safetensors def test_safetensors_load_from_local_msgpack_before_safetensors(self): """ This test checks that we'll first download msgpack weights before safetensors The safetensors file on that repo is a pt safetensors and therefore cannot be loaded without PyTorch """ with tempfile.TemporaryDirectory() as tmp: location = snapshot_download("hf-internal-testing/tiny-bert-pt-safetensors-msgpack", cache_dir=tmp) FlaxBertModel.from_pretrained(location) @require_safetensors def test_safetensors_flax_from_flax(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = FlaxBertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(model, new_model)) @require_safetensors @require_torch @is_pt_flax_cross_test def test_safetensors_flax_from_torch(self): hub_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = FlaxBertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(hub_model, new_model)) @require_safetensors def test_safetensors_flax_from_sharded_msgpack_with_sharded_safetensors_local(self): with tempfile.TemporaryDirectory() as tmp_dir: path = snapshot_download( "hf-internal-testing/tiny-bert-flax-safetensors-msgpack-sharded", cache_dir=tmp_dir ) # This should not raise even if there are two types of sharded weights FlaxBertModel.from_pretrained(path) @require_safetensors def test_safetensors_flax_from_sharded_msgpack_with_sharded_safetensors_hub(self): # This should not raise even if there are two types of sharded weights # This should discard the safetensors weights in favor of the msgpack sharded weights FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-safetensors-msgpack-sharded") @require_safetensors def test_safetensors_from_pt_bf16(self): # This should not raise; should be able to load bf16-serialized torch safetensors without issue # and without torch. logger = logging.get_logger("transformers.modeling_flax_utils") with CaptureLogger(logger) as cl: FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-bf16") self.assertTrue( "Some of the weights of FlaxBertModel were initialized in bfloat16 precision from the model checkpoint" in cl.out ) @require_torch @require_safetensors @is_pt_flax_cross_test def test_from_pt_bf16(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") model.to(torch.bfloat16) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=False) logger = logging.get_logger("transformers.modeling_flax_utils") with CaptureLogger(logger) as cl: new_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-safetensors-bf16") self.assertTrue( "Some of the weights of FlaxBertModel were initialized in bfloat16 precision from the model checkpoint" in cl.out ) flat_params_1 = flatten_dict(new_model.params) for value in flat_params_1.values(): self.assertEqual(value.dtype, "bfloat16")
transformers/tests/utils/test_modeling_flax_utils.py/0
{ "file_path": "transformers/tests/utils/test_modeling_flax_utils.py", "repo_id": "transformers", "token_count": 7105 }
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that checks whether the copies defined in the library match the original or not. This includes: - All code commented with `# Copied from` comments, - The list of models in the main README.md matches the ones in the localized READMEs, - Files that are registered as full copies of one another in the `FULL_COPIES` constant of this script. This also checks the list of models in the README is complete (has all models) and add a line to complete if there is a model missing. Use from the root of the repo with: ```bash python utils/check_copies.py ``` for a check that will error in case of inconsistencies (used by `make repo-consistency`) or ```bash python utils/check_copies.py --fix_and_overwrite ``` for a check that will fix all inconsistencies automatically (used by `make fix-copies`). """ import argparse import glob import os import re import subprocess from collections import OrderedDict from typing import List, Optional, Tuple, Union from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py TRANSFORMERS_PATH = "src/transformers" MODEL_TEST_PATH = "tests/models" PATH_TO_DOCS = "docs/source/en" REPO_PATH = "." # Mapping for files that are full copies of others (keys are copies, values the file to keep them up to data with) FULL_COPIES = { "examples/tensorflow/question-answering/utils_qa.py": "examples/pytorch/question-answering/utils_qa.py", "examples/flax/question-answering/utils_qa.py": "examples/pytorch/question-answering/utils_qa.py", } LOCALIZED_READMES = { # If the introduction or the conclusion of the list change, the prompts may need to be updated. "README.md": { "start_prompt": "🤗 Transformers currently provides the following architectures", "end_prompt": "1. Want to contribute a new model?", "format_model_list": ( "**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by" " {paper_authors}.{supplements}" ), }, "README_zh-hans.md": { "start_prompt": "🤗 Transformers 目前支持如下的架构", "end_prompt": "1. 想要贡献新的模型?", "format_model_list": ( "**[{title}]({model_link})** (来自 {paper_affiliations}) 伴随论文 {paper_title_link} 由 {paper_authors}" " 发布。{supplements}" ), }, "README_zh-hant.md": { "start_prompt": "🤗 Transformers 目前支援以下的架構", "end_prompt": "1. 想要貢獻新的模型?", "format_model_list": ( "**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by" " {paper_authors}.{supplements}" ), }, "README_ko.md": { "start_prompt": "🤗 Transformers는 다음 모델들을 제공합니다", "end_prompt": "1. 새로운 모델을 올리고 싶나요?", "format_model_list": ( "**[{title}]({model_link})** ({paper_affiliations} 에서 제공)은 {paper_authors}.{supplements}의" " {paper_title_link}논문과 함께 발표했습니다." ), }, "README_es.md": { "start_prompt": "🤗 Transformers actualmente proporciona las siguientes arquitecturas", "end_prompt": "1. ¿Quieres aportar un nuevo modelo?", "format_model_list": ( "**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by" " {paper_authors}.{supplements}" ), }, "README_ja.md": { "start_prompt": "🤗Transformersは現在、以下のアーキテクチャを提供しています", "end_prompt": "1. 新しいモデルを投稿したいですか?", "format_model_list": ( "**[{title}]({model_link})** ({paper_affiliations} から) {paper_authors}.{supplements} から公開された研究論文" " {paper_title_link}" ), }, "README_hd.md": { "start_prompt": "🤗 ट्रांसफॉर्मर वर्तमान में निम्नलिखित आर्किटेक्चर का समर्थन करते हैं", "end_prompt": "1. एक नए मॉडल में योगदान देना चाहते हैं?", "format_model_list": ( "**[{title}]({model_link})** ({paper_affiliations} से) {paper_authors}.{supplements} द्वारा" "अनुसंधान पत्र {paper_title_link} के साथ जारी किया गया" ), }, "README_ru.md": { "start_prompt": "🤗 В настоящее время Transformers предоставляет следующие архитектуры", "end_prompt": "1. Хотите внести новую модель?", "format_model_list": ( "**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by" " {paper_authors}.{supplements}" ), }, "README_pt-br.md": { "start_prompt": "🤗 Transformers atualmente fornece as seguintes arquiteturas", "end_prompt": "1. Quer contribuir com um novo modelo?", "format_model_list": ( "**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by" " {paper_authors}.{supplements}" ), }, "README_te.md": { "start_prompt": "🤗 ట్రాన్స్‌ఫార్మర్లు ప్రస్తుతం కింది ఆర్కిటెక్చర్‌లను అందజేస్తున్నాయి", "end_prompt": "1. కొత్త మోడల్‌ను అందించాలనుకుంటున్నారా?", "format_model_list": ( "**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by" " {paper_authors}.{supplements}" ), }, "README_fr.md": { "start_prompt": "🤗 Transformers fournit actuellement les architectures suivantes", "end_prompt": "1. Vous souhaitez contribuer avec un nouveau modèle ?", "format_model_list": ( "**[{title}]({model_link})** (de {paper_affiliations}) publié dans l'article {paper_title_link} par" "{paper_authors}.{supplements}" ), }, "README_de.md": { "start_prompt": "🤗 Transformers bietet derzeit die folgenden Architekturen an", "end_prompt": "1. Möchten Sie ein neues Modell beitragen?", "format_model_list": ( "**[{title}]({model_link})** (from {paper_affiliations}) released with the paper {paper_title_link} by" " {paper_authors}.{supplements}" ), }, "README_vi.md": { "start_prompt": "🤗 Transformers hiện đang cung cấp các kiến trúc sau đây", "end_prompt": "1. Muốn đóng góp một mô hình mới?", "format_model_list": ( "**[{title}]({model_link})** (từ {paper_affiliations}) được phát hành với bài báo {paper_title_link} by" " {paper_authors}.{supplements}" ), }, } # This is to make sure the transformers module imported is the one in the repo. transformers_module = direct_transformers_import(TRANSFORMERS_PATH) def _is_definition_header_ending_line(line: str) -> bool: # Helper function. Returns `True` if `line` is the end parenthesis of a class/function definition return re.search(r"^\s*\)(\s*->.*:|:)\s*$", line) is not None def _should_continue(line: str, indent: str) -> bool: # Helper function. Returns `True` if `line` is empty, starts with the `indent` or is the end parenthesis of a # class/function definition return line.startswith(indent) or len(line.strip()) == 0 or _is_definition_header_ending_line(line) def _sanity_check_splits(splits_1, splits_2, is_class, filename): """Check the two (inner) block structures of the corresponding code block given by `split_code_into_blocks` match. For the case of `class`, they must be of one of the following 3 cases: - a single block without name: class foo: a = 1 - a consecutive sequence of (1 or more) blocks with name class foo: def f(x): return x - a block without name, followed by a consecutive sequence of (1 or more) blocks with name class foo: a = 1 def f(x): return x def g(x): return None The 2 code snippets that give `splits_1` and `splits_2` have to be in the same case to pass this check, but the number of blocks with name in the consecutive sequence is not taken into account. For the case of `function or method`, we don't require it to be in one of the above 3 cases. However, the structure of`splits_1` and `splits_2` have to match exactly. In particular, the number of blocks with name in a consecutive sequence is taken into account. """ block_names_1 = [] block_names_2 = [] for block in splits_1[1:]: if block[0].startswith("_block_without_name_"): block_names_1.append("block_without_name") elif not block[0].startswith("_empty_block_") and ( not is_class or len(block_names_1) == 0 or block_names_1[-1].startswith("block_without_name") ): block_names_1.append("block_with_name") for block in splits_2[1:]: if block[0].startswith("_block_without_name_"): block_names_2.append("block_without_name") elif not block[0].startswith("_empty_block_") and ( not is_class or len(block_names_2) == 0 or block_names_2[-1].startswith("block_without_name") ): block_names_2.append("block_with_name") if is_class: if block_names_1 not in [ ["block_without_name"], ["block_with_name"], ["block_without_name", "block_with_name"], ]: raise ValueError( f"""Class defined in {filename} doesn't have the expected stucture. See the docstring of `_sanity_check_splits` in the file `utils/check_copies.py`""", ) if block_names_1 != block_names_2: raise ValueError(f"In {filename}, two code blocks expected to be copies have different structures.") def find_block_end(lines: List[str], start_index: int, indent: int) -> int: """ Find the end of the class/func block starting at `start_index` in a source code (defined by `lines`). Args: lines (`List[str]`): The source code, represented by a list of lines. start_index (`int`): The starting index of the target class/func block. indent (`int`): The indent of the class/func body. Returns: `int`: The index of the block's ending line plus by 1 (i.e. exclusive). """ indent = " " * indent # enter the block body line_index = start_index + 1 while line_index < len(lines) and _should_continue(lines[line_index], indent): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1]) <= 1: line_index -= 1 return line_index def split_code_into_blocks( lines: List[str], start_index: int, end_index: int, indent: int, backtrace: bool = False ) -> List[Tuple[str, int, int]]: """ Split the class/func block starting at `start_index` in a source code (defined by `lines`) into *inner blocks*. The block's header is included as the first element. The contiguous regions (without empty lines) that are not inside any inner block are included as blocks. The contiguous regions of empty lines that are not inside any inner block are also included as (dummy) blocks. Args: lines (`List[str]`): The source code, represented by a list of lines. start_index (`int`): The starting index of the target class/func block. end_index (`int`): The ending index of the target class/func block. indent (`int`): The indent of the class/func body. backtrace (`bool`, *optional*, defaults to `False`): Whether or not to include the lines before the inner class/func block's header (e.g. comments, decorators, etc.) until an empty line is encountered. Returns: `List[Tuple[str, int, int]]`: A list of elements with the form `(block_name, start_index, end_index)`. """ splits = [] # `indent - 4` is the indent level of the target class/func header try: target_block_name = re.search( rf"^{' ' * (indent - 4)}((class|def)\s+\S+)(\(|\:)", lines[start_index] ).groups()[0] except Exception: start_context = min(start_index - 10, 0) end_context = min(end_index + 10, len(lines)) raise ValueError( f"Tried to split a class or function. It did not work. Error comes from line {start_index}: \n```\n" + "".join(lines[start_context:end_context]) + "```\n" ) # from now on, the `block` means inner blocks unless explicitly specified indent_str = " " * indent block_without_name_idx = 0 empty_block_idx = 0 # Find the lines for the definition header index = start_index if "(" in lines[start_index] and "):" not in lines[start_index] in lines[start_index]: while index < end_index: if _is_definition_header_ending_line(lines[index]): break index += 1 # the first line outside the definition header index += 1 splits.append((target_block_name, start_index, index)) block_start_index, prev_block_end_index = index, index while index < end_index: # if found, it will be an inner block block_found = re.search(rf"^{indent_str}((class|def)\s+\S+)(\(|\:)", lines[index]) if block_found: name = block_found.groups()[0] block_end_index = find_block_end(lines, index, indent + 4) # backtrace to include the lines before the found block's definition header (e.g. comments, decorators, # etc.) until an empty line is encountered. block_start_index = index if index > prev_block_end_index and backtrace: idx = index - 1 for idx in range(index - 1, prev_block_end_index - 2, -1): if not (len(lines[idx].strip()) > 0 and lines[idx].startswith(indent_str)): break idx += 1 if idx < index: block_start_index = idx # between the current found block and the previous found block if block_start_index > prev_block_end_index: # give it a dummy name if len("".join(lines[prev_block_end_index:block_start_index]).strip()) == 0: prev_block_name = f"_empty_block_{empty_block_idx}" empty_block_idx += 1 else: prev_block_name = f"_block_without_name_{block_without_name_idx}" block_without_name_idx += 1 # Add it as a block splits.append((prev_block_name, prev_block_end_index, block_start_index)) # Add the current found block splits.append((name, block_start_index, block_end_index)) prev_block_end_index = block_end_index index = block_end_index - 1 index += 1 if index > prev_block_end_index: if len("".join(lines[prev_block_end_index:index]).strip()) == 0: prev_block_name = f"_empty_block_{empty_block_idx}" else: prev_block_name = f"_block_without_name_{block_without_name_idx}" splits.append((prev_block_name, prev_block_end_index, index)) return splits def find_code_in_transformers( object_name: str, base_path: str = None, return_indices: bool = False ) -> Union[str, Tuple[List[str], int, int]]: """ Find and return the source code of an object. Args: object_name (`str`): The name of the object we want the source code of. base_path (`str`, *optional*): The path to the base folder where files are checked. If not set, it will be set to `TRANSFORMERS_PATH`. return_indices(`bool`, *optional*, defaults to `False`): If `False`, will only return the code (as a string), otherwise it will also return the whole lines of the file where the object specified by `object_name` is defined, together the start/end indices of the block in the file that defines the object. Returns: `Union[str, Tuple[List[str], int, int]]`: If `return_indices=False`, only the source code of the object will be returned. Otherwise, it also returns the whole lines of the file where the object specified by `object_name` is defined, together the start/end indices of the block in the file that defines the object. """ parts = object_name.split(".") i = 0 # We can't set this as the default value in the argument, otherwise `CopyCheckTester` will fail, as it uses a # patched temp directory. if base_path is None: base_path = TRANSFORMERS_PATH # Detail: the `Copied from` statement is originally designed to work with the last part of `TRANSFORMERS_PATH`, # (which is `transformers`). The same should be applied for `MODEL_TEST_PATH`. However, its last part is `models` # (to only check and search in it) which is a bit confusing. So we keep the copied statement staring with # `tests.models.` and change it to `tests` here. if base_path == MODEL_TEST_PATH: base_path = "tests" # First let's find the module where our object lives. module = parts[i] while i < len(parts) and not os.path.isfile(os.path.join(base_path, f"{module}.py")): i += 1 if i < len(parts): module = os.path.join(module, parts[i]) if i >= len(parts): raise ValueError( f"`object_name` should begin with the name of a module of transformers but got {object_name}." ) with open(os.path.join(base_path, f"{module}.py"), "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Now let's find the class / func in the code! indent = "" line_index = 0 for name in parts[i + 1 :]: while ( line_index < len(lines) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None ): line_index += 1 # find the target specified in the current level in `parts` -> increase `indent` so we can search the next indent += " " # the index of the first line in the (currently found) block *body* line_index += 1 if line_index >= len(lines): raise ValueError(f" {object_name} does not match any function or class in {module}.") # `indent` is already one level deeper than the (found) class/func block's definition header # We found the beginning of the class / func, now let's find the end (when the indent diminishes). # `start_index` is the index of the class/func block's definition header start_index = line_index - 1 end_index = find_block_end(lines, start_index, len(indent)) code = "".join(lines[start_index:end_index]) return (code, (lines, start_index, end_index)) if return_indices else code def replace_code(code: str, replace_pattern: str) -> str: """Replace `code` by a pattern of the form `with X1->X2,Y1->Y2,Z1->Z2`. Args: code (`str`): The code to be modified. replace_pattern (`str`): The pattern used to modify `code`. Returns: `str`: The modified code. """ if len(replace_pattern) > 0: patterns = replace_pattern.replace("with", "").split(",") patterns = [_re_replace_pattern.search(p) for p in patterns] for pattern in patterns: if pattern is None: continue obj1, obj2, option = pattern.groups() code = re.sub(obj1, obj2, code) if option.strip() == "all-casing": code = re.sub(obj1.lower(), obj2.lower(), code) code = re.sub(obj1.upper(), obj2.upper(), code) return code def find_code_and_splits(object_name: str, base_path: str, buffer: dict = None): """Find the code of an object (specified by `object_name`) and split it into blocks. Args: object_name (`str`): The name of the object, e.g. `transformers.models.bert.modeling_bert.BertAttention` or `tests.models.llama.test_modeling_llama.LlamaModelTest.test_config`. base_path (`str`): The path to the base directory within which the search will be performed. It could be either `TRANSFORMERS_PATH` or `MODEL_TEST_PATH`. buffer (`dict`, *optional*): The buffer used to store the previous results in order to speed up the process. Returns: lines (`List[str]`): The lines of the whole file where the object is defined. code (`str`): The object's code. code_splits (`List[Tuple[str, int, int]]`): `code` splitted into blocks. See `split_code_into_blocks`. """ if buffer is None: buffer = {} if (object_name, base_path) in buffer: lines, code, code_splits = buffer[(object_name, base_path)] else: code, (lines, target_start_index, target_end_index) = find_code_in_transformers( object_name, base_path=base_path, return_indices=True ) indent = get_indent(code) # Split the code into blocks # `indent` is the indent of the class/func definition header, but `code_splits` expects the indent level of the # block body. code_splits = split_code_into_blocks( lines, target_start_index, target_end_index, len(indent) + 4, backtrace=True ) buffer[(object_name, base_path)] = lines, code, code_splits return lines, code, code_splits _re_copy_warning = re.compile(r"^(\s*)#\s*Copied from\s+transformers\.(\S+\.\S+)\s*($|\S.*$)") _re_copy_warning_for_test_file = re.compile(r"^(\s*)#\s*Copied from\s+tests\.(\S+\.\S+)\s*($|\S.*$)") _re_replace_pattern = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)") _re_fill_pattern = re.compile(r"<FILL\s+[^>]*>") def get_indent(code: str) -> str: """ Find the indent in the first non empty line in a code sample. Args: code (`str`): The code to inspect. Returns: `str`: The indent looked at (as string). """ lines = code.split("\n") idx = 0 while idx < len(lines) and len(lines[idx]) == 0: idx += 1 if idx < len(lines): return re.search(r"^(\s*)\S", lines[idx]).groups()[0] return "" def run_ruff(code, check=False): if check: command = ["ruff", "check", "-", "--fix", "--exit-zero"] else: command = ["ruff", "format", "-", "--config", "pyproject.toml", "--silent"] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, _ = process.communicate(input=code.encode()) return stdout.decode() def stylify(code: str) -> str: """ Applies the ruff part of our `make style` command to some code. This formats the code using `ruff format`. As `ruff` does not provide a python api this cannot be done on the fly. Args: code (`str`): The code to format. Returns: `str`: The formatted code. """ has_indent = len(get_indent(code)) > 0 if has_indent: code = f"class Bla:\n{code}" formatted_code = run_ruff(code) return formatted_code[len("class Bla:\n") :] if has_indent else formatted_code def check_codes_match(observed_code: str, theoretical_code: str) -> Optional[int]: """ Checks if two version of a code match with the exception of the class/function name. Args: observed_code (`str`): The code found. theoretical_code (`str`): The code to match. Returns: `Optional[int]`: The index of the first line where there is a difference (if any) and `None` if the codes match. """ observed_code_header = observed_code.split("\n")[0] theoretical_code_header = theoretical_code.split("\n")[0] # Catch the function/class name: it is expected that those do not match. _re_class_match = re.compile(r"class\s+([^\(:]+)(?:\(|:)") _re_func_match = re.compile(r"def\s+([^\(]+)\(") for re_pattern in [_re_class_match, _re_func_match]: if re_pattern.match(observed_code_header) is not None: try: observed_obj_name = re_pattern.search(observed_code_header).groups()[0] except Exception: raise ValueError( "Tried to split a class or function. It did not work. Error comes from: \n```\n" + observed_code_header + "\n```\n" ) try: theoretical_name = re_pattern.search(theoretical_code_header).groups()[0] except Exception: raise ValueError( "Tried to split a class or function. It did not work. Error comes from: \n```\n" + theoretical_code_header + "\n```\n" ) theoretical_code_header = theoretical_code_header.replace(theoretical_name, observed_obj_name) # Find the first diff. Line 0 is special since we need to compare with the function/class names ignored. diff_index = 0 if theoretical_code_header != observed_code_header: return 0 diff_index = 1 for observed_line, theoretical_line in zip(observed_code.split("\n")[1:], theoretical_code.split("\n")[1:]): if observed_line != theoretical_line: return diff_index diff_index += 1 def is_copy_consistent(filename: str, overwrite: bool = False, buffer: dict = None) -> Optional[List[Tuple[str, int]]]: """ Check if the code commented as a copy in a file matches the original. Args: filename (`str`): The name of the file to check. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the copies when they don't match. buffer (`dict`, *optional*): The buffer used to store the previous results in order to speed up the process. Returns: `Optional[List[Tuple[str, int]]]`: If `overwrite=False`, returns the list of differences as tuples `(str, int)` with the name of the object having a diff and the line number where theere is the first diff. """ base_path = TRANSFORMERS_PATH if not filename.startswith("tests") else MODEL_TEST_PATH with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() diffs = [] line_index = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). search_re = _re_copy_warning_for_test_file if filename.startswith("tests") else _re_copy_warning while line_index < len(lines): search = search_re.search(lines[line_index]) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. indent, object_name, replace_pattern = search.groups() # Find the file lines, the object's code, and its blocks try: target_lines, theoretical_code, theoretical_code_splits = find_code_and_splits( object_name, base_path, buffer=buffer ) except Exception as exc: exc.args = (f"Error while trying to find source code for {filename}.\n\n" + str(exc),) raise # code replaced by the patterns theoretical_code_blocks = OrderedDict() for name, start, end in theoretical_code_splits: name = replace_code(name, replace_pattern) code = "".join(target_lines[start:end]) code = replace_code(code, replace_pattern) theoretical_code_blocks[name] = code theoretical_indent = get_indent(theoretical_code) # `start_index` is the index of the first line (the definition header) after `# Copied from`. # (`indent != theoretical_indent` doesn't seem to occur so far, not sure what this case is for.) start_index = line_index + 1 if indent == theoretical_indent else line_index # enter the block body line_index = start_index + 1 subcode = "\n".join(theoretical_code.split("\n")[1:]) indent = get_indent(subcode) # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. # We can't call `find_block_end` directly as there is sth. special `# End copy"` here. should_continue = True while line_index < len(lines) and should_continue: line_index += 1 if line_index >= len(lines): break line = lines[line_index] # There is a special pattern `# End copy` to stop early. It's not documented cause it shouldn't really be # used. should_continue = _should_continue(line, indent) and re.search(f"^{indent}# End copy", line) is None # `line_index` is outside the block # Clean up empty lines at the end (if any). while len(lines[line_index - 1]) <= 1: line_index -= 1 # Split the observed code into blocks observed_code_splits = split_code_into_blocks(lines, start_index, line_index, len(indent), backtrace=True) is_class = lines[start_index].startswith(f"{' ' * (len(indent) - 4)}class ") # sanity check _sanity_check_splits(theoretical_code_splits, observed_code_splits, is_class=is_class, filename=filename) # observed code in a structured way (a dict mapping block names to blocks' code) observed_code_blocks = OrderedDict() for name, start, end in observed_code_splits: code = "".join(lines[start:end]) observed_code_blocks[name] = code # Below, we change some names in `theoretical_code_blocks` and `observed_code_blocks`. These mappings map the # original names to the modified names: this is used to restore the original order of the code blocks. name_mappings_1 = {k: k for k in theoretical_code_blocks.keys()} name_mappings_2 = {k: k for k in observed_code_blocks.keys()} # Update code blocks' name and content: # If `"# Ignore copy"` is found in a block of the observed code: # 1. if it's a block only in the observed code --> add it to the theoretical code. # 2. if it's also in the theoretical code () --> put its content (body) to the corresponding block under the # same name in the theoretical code. # In both cases, we change the name to have a prefix `_ignored_` so we know if we can discard them during the # comparison. ignored_existing_block_index = 0 ignored_new_block_index = 0 for name in list(observed_code_blocks.keys()): code = observed_code_blocks[name] if "# Ignore copy" in code: if name in theoretical_code_blocks: # in the target --> just copy the content del theoretical_code_blocks[name] theoretical_code_blocks[f"_ignored_existing_block_{ignored_existing_block_index}"] = code name_mappings_1[name] = f"_ignored_existing_block_{ignored_existing_block_index}" del observed_code_blocks[name] observed_code_blocks[f"_ignored_existing_block_{ignored_existing_block_index}"] = code name_mappings_2[name] = f"_ignored_existing_block_{ignored_existing_block_index}" ignored_existing_block_index += 1 else: # not in the target --> add it theoretical_code_blocks[f"_ignored_new_block_{ignored_new_block_index}"] = code name_mappings_1[f"_ignored_new_block_{ignored_new_block_index}"] = ( f"_ignored_new_block_{ignored_new_block_index}" ) del observed_code_blocks[name] observed_code_blocks[f"_ignored_new_block_{ignored_new_block_index}"] = code name_mappings_2[name] = f"_ignored_new_block_{ignored_new_block_index}" ignored_new_block_index += 1 # Respect the original block order: # 1. in `theoretical_code_blocks`: the new blocks will follow the existing ones # 2. in `observed_code_blocks`: the original order are kept with names modified potentially. This is necessary # to compute the correct `diff_index` if `overwrite=True` and there is a diff. theoretical_code_blocks = { name_mappings_1[orig_name]: theoretical_code_blocks[name_mappings_1[orig_name]] for orig_name in name_mappings_1 } observed_code_blocks = { name_mappings_2[orig_name]: observed_code_blocks[name_mappings_2[orig_name]] for orig_name in name_mappings_2 } # Ignore the blocks specified to be ignored. This is the version used to check if there is a mismatch theoretical_code_blocks_clean = { k: v for k, v in theoretical_code_blocks.items() if not (k.startswith(("_ignored_existing_block_", "_ignored_new_block_"))) } theoretical_code = "".join(list(theoretical_code_blocks_clean.values())) # stylify `theoretical_code` before compare (this is needed only when `replace_pattern` is not empty) if replace_pattern: theoretical_code = stylify(theoretical_code) # Remove `\n\n` in `theoretical_code` before compare (so no empty line) while "\n\n" in theoretical_code: theoretical_code = theoretical_code.replace("\n\n", "\n") # Compute `observed_code` where we don't include any empty line + keep track the line index between the # original/processed `observed_code` so we can have the correct `diff_index`. idx_to_orig_idx_mapping_for_observed_code_lines = {} idx = -1 orig_idx = -1 observed_code = "" for name, code in observed_code_blocks.items(): if code.endswith("\n"): code = code[:-1] for code_line in code.split("\n"): orig_idx += 1 if code_line.strip() and not name.startswith(("_ignored_existing_block_", "_ignored_new_block_")): idx += 1 observed_code += code_line + "\n" idx_to_orig_idx_mapping_for_observed_code_lines[idx] = orig_idx # Test for a diff and act accordingly. diff_index = check_codes_match(observed_code, theoretical_code) if diff_index is not None: # switch to the index in the original `observed_code` (i.e. before removing empty lines) diff_index = idx_to_orig_idx_mapping_for_observed_code_lines[diff_index] diffs.append([object_name, diff_index + start_index + 1]) if overwrite: # `theoretical_code_to_write` is a single string but may have several lines. theoretical_code_to_write = stylify("".join(list(theoretical_code_blocks.values()))) lines = lines[:start_index] + [theoretical_code_to_write] + lines[line_index:] # Here we treat it as a single entry in `lines`. line_index = start_index + 1 if overwrite and len(diffs) > 0: # Warn the user a file has been modified. print(f"Detected changes, rewriting {filename}.") with open(filename, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) return diffs def check_copies(overwrite: bool = False, file: str = None): """ Check every file is copy-consistent with the original. Also check the model list in the main README and other READMEs are consistent. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the copies when they don't match. file (`bool`, *optional*): The path to a specific file to check and/or fix. """ buffer = {} if file is None: all_files = glob.glob(os.path.join(TRANSFORMERS_PATH, "**/*.py"), recursive=True) all_test_files = glob.glob(os.path.join(MODEL_TEST_PATH, "**/*.py"), recursive=True) all_files = list(all_files) + list(all_test_files) else: all_files = [file] diffs = [] for filename in all_files: new_diffs = is_copy_consistent(filename, overwrite, buffer) diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs] if not overwrite and len(diffs) > 0: diff = "\n".join(diffs) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) def check_full_copies(overwrite: bool = False): """ Check the files that are full copies of others (as indicated in `FULL_COPIES`) are copy-consistent. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the copies when they don't match. """ diffs = [] for target, source in FULL_COPIES.items(): with open(source, "r", encoding="utf-8") as f: source_code = f.read() with open(target, "r", encoding="utf-8") as f: target_code = f.read() if source_code != target_code: if overwrite: with open(target, "w", encoding="utf-8") as f: print(f"Replacing the content of {target} by the one of {source}.") f.write(source_code) else: diffs.append(f"- {target}: copy does not match {source}.") if not overwrite and len(diffs) > 0: diff = "\n".join(diffs) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) def get_model_list(filename: str, start_prompt: str, end_prompt: str) -> str: """ Extracts the model list from a README. Args: filename (`str`): The name of the README file to check. start_prompt (`str`): The string to look for that introduces the model list. end_prompt (`str`): The string to look for that ends the model list. Returns: `str`: The model list. """ with open(os.path.join(REPO_PATH, filename), "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start of the list. start_index = 0 while not lines[start_index].startswith(start_prompt): start_index += 1 start_index += 1 result = [] current_line = "" end_index = start_index # Keep going until the end of the list. while not lines[end_index].startswith(end_prompt): if lines[end_index].startswith("1."): if len(current_line) > 1: result.append(current_line) current_line = lines[end_index] elif len(lines[end_index]) > 1: current_line = f"{current_line[:-1]} {lines[end_index].lstrip()}" end_index += 1 if len(current_line) > 1: result.append(current_line) return "".join(result) def convert_to_localized_md(model_list: str, localized_model_list: str, format_str: str) -> Tuple[bool, str]: """ Compare the model list from the main README to the one in a localized README. Args: model_list (`str`): The model list in the main README. localized_model_list (`str`): The model list in one of the localized README. format_str (`str`): The template for a model entry in the localized README (look at the `format_model_list` in the entries of `LOCALIZED_READMES` for examples). Returns: `Tuple[bool, str]`: A tuple where the first value indicates if the READMEs match or not, and the second value is the correct localized README. """ def _rep(match): title, model_link, paper_affiliations, paper_title_link, paper_authors, supplements = match.groups() return format_str.format( title=title, model_link=model_link, paper_affiliations=paper_affiliations, paper_title_link=paper_title_link, paper_authors=paper_authors, supplements=" " + supplements.strip() if len(supplements) != 0 else "", ) # This regex captures metadata from an English model description, including model title, model link, # affiliations of the paper, title of the paper, authors of the paper, and supplemental data (see DistilBERT for # example). _re_capture_meta = re.compile( r"\*\*\[([^\]]*)\]\(([^\)]*)\)\*\* \(from ([^)]*)\)[^\[]*([^\)]*\)).*?by (.*?[A-Za-z\*]{2,}?)\. (.*)$" ) # This regex is used to synchronize title link. _re_capture_title_link = re.compile(r"\*\*\[([^\]]*)\]\(([^\)]*)\)\*\*") # This regex is used to synchronize paper title and link. _re_capture_paper_link = re.compile(r" \[([^\]]*)\]\(([^\)]*)\)") if len(localized_model_list) == 0: localized_model_index = {} else: try: localized_model_index = { re.search(r"\*\*\[([^\]]*)", line).groups()[0]: line for line in localized_model_list.strip().split("\n") } except AttributeError: raise AttributeError("A model name in localized READMEs cannot be recognized.") model_keys = [re.search(r"\*\*\[([^\]]*)", line).groups()[0] for line in model_list.strip().split("\n")] # We exclude keys in localized README not in the main one. readmes_match = not any(k not in model_keys for k in localized_model_index) localized_model_index = {k: v for k, v in localized_model_index.items() if k in model_keys} for model in model_list.strip().split("\n"): title, model_link = _re_capture_title_link.search(model).groups() if title not in localized_model_index: readmes_match = False # Add an anchor white space behind a model description string for regex. # If metadata cannot be captured, the English version will be directly copied. localized_model_index[title] = _re_capture_meta.sub(_rep, model + " ") elif _re_fill_pattern.search(localized_model_index[title]) is not None: update = _re_capture_meta.sub(_rep, model + " ") if update != localized_model_index[title]: readmes_match = False localized_model_index[title] = update else: # Synchronize title link converted_model = _re_capture_title_link.sub( f"**[{title}]({model_link})**", localized_model_index[title], count=1 ) # Synchronize paper title and its link (if found) paper_title_link = _re_capture_paper_link.search(model) if paper_title_link is not None: paper_title, paper_link = paper_title_link.groups() converted_model = _re_capture_paper_link.sub( f" [{paper_title}]({paper_link})", converted_model, count=1 ) if converted_model != localized_model_index[title]: readmes_match = False localized_model_index[title] = converted_model sorted_index = sorted(localized_model_index.items(), key=lambda x: x[0].lower()) return readmes_match, "\n".join((x[1] for x in sorted_index)) + "\n" def _find_text_in_file(filename: str, start_prompt: str, end_prompt: str) -> Tuple[str, int, int, List[str]]: """ Find the text in a file between two prompts. Args: filename (`str`): The name of the file to look into. start_prompt (`str`): The string to look for that introduces the content looked for. end_prompt (`str`): The string to look for that ends the content looked for. Returns: Tuple[str, int, int, List[str]]: The content between the two prompts, the index of the start line in the original file, the index of the end line in the original file and the list of lines of that file. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start prompt. start_index = 0 while not lines[start_index].startswith(start_prompt): start_index += 1 start_index += 1 end_index = start_index while not lines[end_index].startswith(end_prompt): end_index += 1 end_index -= 1 while len(lines[start_index]) <= 1: start_index += 1 while len(lines[end_index]) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index]), start_index, end_index, lines # Map a model name with the name it has in the README for the check_readme check SPECIAL_MODEL_NAMES = { "Bert Generation": "BERT For Sequence Generation", "BigBird": "BigBird-RoBERTa", "Data2VecAudio": "Data2Vec", "Data2VecText": "Data2Vec", "Data2VecVision": "Data2Vec", "DonutSwin": "Swin Transformer", "Marian": "MarianMT", "MaskFormerSwin": "Swin Transformer", "OpenAI GPT-2": "GPT-2", "OpenAI GPT": "GPT", "Perceiver": "Perceiver IO", "SAM": "Segment Anything", "ViT": "Vision Transformer (ViT)", } # Update this list with the models that shouldn't be in the README. This only concerns modular models or those who do # not have an associated paper. MODELS_NOT_IN_README = [ "BertJapanese", "Encoder decoder", "FairSeq Machine-Translation", "HerBERT", "RetriBERT", "Speech Encoder decoder", "Speech2Text", "Speech2Text2", "TimmBackbone", "Vision Encoder decoder", "VisionTextDualEncoder", "CLIPVisionModel", "SiglipVisionModel", "ChineseCLIPVisionModel", "VitPoseBackbone", ] # Template for new entries to add in the main README when we have missing models. README_TEMPLATE = ( "1. **[{model_name}](https://huggingface.co/docs/main/transformers/model_doc/{model_type})** (from " "<FILL INSTITUTION>) released with the paper [<FILL PAPER TITLE>](<FILL ARKIV LINK>) by <FILL AUTHORS>." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--file", type=str, default=None, help="A specific file to check and/or fix") parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_copies(args.fix_and_overwrite, args.file) check_full_copies(args.fix_and_overwrite)
transformers/utils/check_copies.py/0
{ "file_path": "transformers/utils/check_copies.py", "repo_id": "transformers", "token_count": 20623 }
""" Script which deprecates a list of given models Example usage: python utils/deprecate_models.py --models bert distilbert """ import argparse import os from collections import defaultdict from pathlib import Path from typing import Optional, Tuple import requests from custom_init_isort import sort_imports_in_all_inits from git import Repo from packaging import version from transformers import CONFIG_MAPPING, logging from transformers import __version__ as current_version REPO_PATH = Path(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) repo = Repo(REPO_PATH) logger = logging.get_logger(__name__) def get_last_stable_minor_release(): # Get the last stable release of transformers url = "https://pypi.org/pypi/transformers/json" release_data = requests.get(url).json() # Find the last stable release of of transformers (version below current version) major_version, minor_version, patch_version, _ = current_version.split(".") last_major_minor = f"{major_version}.{int(minor_version) - 1}" last_stable_minor_releases = [ release for release in release_data["releases"] if release.startswith(last_major_minor) ] last_stable_release = sorted(last_stable_minor_releases, key=version.parse)[-1] return last_stable_release def build_tip_message(last_stable_release): return ( """ <Tip warning={true}> This model is in maintenance mode only, we don't accept any new PRs changing its code. """ + f"""If you run into any issues running this model, please reinstall the last version that supported this model: v{last_stable_release}. You can do so by running the following command: `pip install -U transformers=={last_stable_release}`. </Tip>""" ) def insert_tip_to_model_doc(model_doc_path, tip_message): tip_message_lines = tip_message.split("\n") with open(model_doc_path, "r") as f: model_doc = f.read() # Add the tip message to the model doc page directly underneath the title lines = model_doc.split("\n") new_model_lines = [] for line in lines: if line.startswith("# "): new_model_lines.append(line) new_model_lines.extend(tip_message_lines) else: new_model_lines.append(line) with open(model_doc_path, "w") as f: f.write("\n".join(new_model_lines)) def get_model_doc_path(model: str) -> Tuple[Optional[str], Optional[str]]: # Possible variants of the model name in the model doc path model_names = [model, model.replace("_", "-"), model.replace("_", "")] model_doc_paths = [REPO_PATH / f"docs/source/en/model_doc/{model_name}.md" for model_name in model_names] for model_doc_path, model_name in zip(model_doc_paths, model_names): if os.path.exists(model_doc_path): return model_doc_path, model_name return None, None def extract_model_info(model): model_info = {} model_doc_path, model_doc_name = get_model_doc_path(model) model_path = REPO_PATH / f"src/transformers/models/{model}" if model_doc_path is None: print(f"Model doc path does not exist for {model}") return None model_info["model_doc_path"] = model_doc_path model_info["model_doc_name"] = model_doc_name if not os.path.exists(model_path): print(f"Model path does not exist for {model}") return None model_info["model_path"] = model_path return model_info def update_relative_imports(filename, model): with open(filename, "r") as f: filelines = f.read() new_file_lines = [] for line in filelines.split("\n"): if line.startswith("from .."): new_file_lines.append(line.replace("from ..", "from ...")) else: new_file_lines.append(line) with open(filename, "w") as f: f.write("\n".join(new_file_lines)) def remove_copied_from_statements(model): model_path = REPO_PATH / f"src/transformers/models/{model}" for file in os.listdir(model_path): if file == "__pycache__": continue file_path = model_path / file with open(file_path, "r") as f: file_lines = f.read() new_file_lines = [] for line in file_lines.split("\n"): if "# Copied from" in line: continue new_file_lines.append(line) with open(file_path, "w") as f: f.write("\n".join(new_file_lines)) def move_model_files_to_deprecated(model): model_path = REPO_PATH / f"src/transformers/models/{model}" deprecated_model_path = REPO_PATH / f"src/transformers/models/deprecated/{model}" if not os.path.exists(deprecated_model_path): os.makedirs(deprecated_model_path) for file in os.listdir(model_path): if file == "__pycache__": continue repo.git.mv(f"{model_path}/{file}", f"{deprecated_model_path}/{file}") # For deprecated files, we then need to update the relative imports update_relative_imports(f"{deprecated_model_path}/{file}", model) def delete_model_tests(model): tests_path = REPO_PATH / f"tests/models/{model}" if os.path.exists(tests_path): repo.git.rm("-r", tests_path) def get_line_indent(s): return len(s) - len(s.lstrip()) def update_main_init_file(models): """ Replace all instances of model.model_name with model.deprecated.model_name in the __init__.py file Args: models (List[str]): The models to mark as deprecated """ filename = REPO_PATH / "src/transformers/__init__.py" with open(filename, "r") as f: init_file = f.read() # 1. For each model, find all the instances of model.model_name and replace with model.deprecated.model_name for model in models: init_file = init_file.replace(f'models.{model}"', f'models.deprecated.{model}"') init_file = init_file.replace(f"models.{model} import", f"models.deprecated.{model} import") with open(filename, "w") as f: f.write(init_file) # 2. Resort the imports sort_imports_in_all_inits(check_only=False) def remove_model_references_from_file(filename, models, condition): """ Remove all references to the given models from the given file Args: filename (str): The file to remove the references from models (List[str]): The models to remove condition (Callable): A function that takes the line and model and returns True if the line should be removed """ filename = REPO_PATH / filename with open(filename, "r") as f: init_file = f.read() new_file_lines = [] for i, line in enumerate(init_file.split("\n")): if any(condition(line, model) for model in models): continue new_file_lines.append(line) with open(filename, "w") as f: f.write("\n".join(new_file_lines)) def remove_model_config_classes_from_config_check(model_config_classes): """ Remove the deprecated model config classes from the check_config_attributes.py file Args: model_config_classes (List[str]): The model config classes to remove e.g. ["BertConfig", "DistilBertConfig"] """ filename = REPO_PATH / "utils/check_config_attributes.py" with open(filename, "r") as f: check_config_attributes = f.read() # Keep track as we have to delete comment above too in_special_cases_to_allow = False in_indent = False new_file_lines = [] for line in check_config_attributes.split("\n"): indent = get_line_indent(line) if (line.strip() == "SPECIAL_CASES_TO_ALLOW = {") or (line.strip() == "SPECIAL_CASES_TO_ALLOW.update("): in_special_cases_to_allow = True elif in_special_cases_to_allow and indent == 0 and line.strip() in ("}", ")"): in_special_cases_to_allow = False if in_indent: if line.strip().endswith(("]", "],")): in_indent = False continue if in_special_cases_to_allow and any( model_config_class in line for model_config_class in model_config_classes ): # Remove comments above the model config class to remove while new_file_lines[-1].strip().startswith("#"): new_file_lines.pop() if line.strip().endswith("["): in_indent = True continue elif any(model_config_class in line for model_config_class in model_config_classes): continue new_file_lines.append(line) with open(filename, "w") as f: f.write("\n".join(new_file_lines)) def add_models_to_deprecated_models_in_config_auto(models): """ Add the models to the DEPRECATED_MODELS list in configuration_auto.py and sorts the list to be in alphabetical order. """ filepath = REPO_PATH / "src/transformers/models/auto/configuration_auto.py" with open(filepath, "r") as f: config_auto = f.read() new_file_lines = [] deprecated_models_list = [] in_deprecated_models = False for line in config_auto.split("\n"): if line.strip() == "DEPRECATED_MODELS = [": in_deprecated_models = True new_file_lines.append(line) elif in_deprecated_models and line.strip() == "]": in_deprecated_models = False # Add the new models to deprecated models list deprecated_models_list.extend([f' "{model}", ' for model in models]) # Sort so they're in alphabetical order in the file deprecated_models_list = sorted(deprecated_models_list) new_file_lines.extend(deprecated_models_list) # Make sure we still have the closing bracket new_file_lines.append(line) elif in_deprecated_models: deprecated_models_list.append(line) else: new_file_lines.append(line) with open(filepath, "w") as f: f.write("\n".join(new_file_lines)) def deprecate_models(models): # Get model info skipped_models = [] models_info = defaultdict(dict) for model in models: single_model_info = extract_model_info(model) if single_model_info is None: skipped_models.append(model) else: models_info[model] = single_model_info model_config_classes = [] for model, model_info in models_info.items(): if model in CONFIG_MAPPING: model_config_classes.append(CONFIG_MAPPING[model].__name__) elif model_info["model_doc_name"] in CONFIG_MAPPING: model_config_classes.append(CONFIG_MAPPING[model_info["model_doc_name"]].__name__) else: skipped_models.append(model) print(f"Model config class not found for model: {model}") # Filter out skipped models models = [model for model in models if model not in skipped_models] if skipped_models: print(f"Skipped models: {skipped_models} as the model doc or model path could not be found.") print(f"Models to deprecate: {models}") # Remove model config classes from config check print("Removing model config classes from config checks") remove_model_config_classes_from_config_check(model_config_classes) tip_message = build_tip_message(get_last_stable_minor_release()) for model, model_info in models_info.items(): print(f"Processing model: {model}") # Add the tip message to the model doc page directly underneath the title print("Adding tip message to model doc page") insert_tip_to_model_doc(model_info["model_doc_path"], tip_message) # Remove #Copied from statements from model's files print("Removing #Copied from statements from model's files") remove_copied_from_statements(model) # Move the model file to deprecated: src/transfomers/models/model -> src/transformers/models/deprecated/model print("Moving model files to deprecated for model") move_model_files_to_deprecated(model) # Delete the model tests: tests/models/model print("Deleting model tests") delete_model_tests(model) # # We do the following with all models passed at once to avoid having to re-write the file multiple times print("Updating __init__.py file to point to the deprecated models") update_main_init_file(models) # Remove model references from other files print("Removing model references from other files") remove_model_references_from_file( "src/transformers/models/__init__.py", models, lambda line, model: model == line.strip().strip(",") ) remove_model_references_from_file( "utils/slow_documentation_tests.txt", models, lambda line, model: "/" + model + "/" in line ) remove_model_references_from_file("utils/not_doctested.txt", models, lambda line, model: "/" + model + "/" in line) # Add models to DEPRECATED_MODELS in the configuration_auto.py print("Adding models to DEPRECATED_MODELS in configuration_auto.py") add_models_to_deprecated_models_in_config_auto(models) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--models", nargs="+", help="List of models to deprecate") args = parser.parse_args() deprecate_models(args.models)
transformers/utils/deprecate_models.py/0
{ "file_path": "transformers/utils/deprecate_models.py", "repo_id": "transformers", "token_count": 5241 }
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This should help you prepare a patch, automatically extracting the commits to cherry-pick in chronological order to avoid merge conflicts. An equivalent way to do this is to use `git log --pretty=oneline HEAD...v4.41.0` and grep. Potential TODO: automatically cherry-picks them. Pass in a list of PR: `python utils/patch_helper.py --prs 31108 31054 31008 31010 31004` will produce the following: ```bash Skipping invalid version tag: list Skipping invalid version tag: localattn1 Git cherry-pick commands to run: git cherry-pick 03935d300d60110bb86edb49d2315089cfb19789 #2024-05-24 11:00:59+02:00 git cherry-pick bdb9106f247fca48a71eb384be25dbbd29b065a8 #2024-05-24 19:02:55+02:00 git cherry-pick 84c4b72ee99e8e65a8a5754a5f9d6265b45cf67e #2024-05-27 10:34:14+02:00 git cherry-pick 936ab7bae5e040ec58994cb722dd587b9ab26581 #2024-05-28 11:56:05+02:00 git cherry-pick 0bef4a273825d2cfc52ddfe62ba486ee61cc116f #2024-05-29 13:33:26+01:00 ``` """ import argparse from git import GitCommandError, Repo from packaging import version def get_merge_commit(repo, pr_number, since_tag): try: # Use git log to find the merge commit for the PR within the given tag range merge_commit = next(repo.iter_commits(f"v{since_tag}...origin/main", grep=f"#{pr_number}")) return merge_commit except StopIteration: print(f"No merge commit found for PR #{pr_number} between tags {since_tag} and {main}") return None except GitCommandError as e: print(f"Error finding merge commit for PR #{pr_number}: {str(e)}") return None def main(pr_numbers): repo = Repo(".") # Initialize the Repo object for the current directory merge_commits = [] tags = {} for tag in repo.tags: try: # Parse and sort tags, skip invalid ones tag_ver = version.parse(tag.name) tags[tag_ver] = tag except Exception: print(f"Skipping invalid version tag: {tag.name}") last_tag = sorted(tags)[-1] major_minor = f"{last_tag.major}.{last_tag.minor}.0" # Iterate through tag ranges to find the merge commits for pr in pr_numbers: pr = pr.split("https://github.com/huggingface/transformers/pull/")[-1] commit = get_merge_commit(repo, pr, major_minor) if commit: merge_commits.append(commit) # Sort commits by date merge_commits.sort(key=lambda commit: commit.committed_datetime) # Output the git cherry-pick commands print("Git cherry-pick commands to run:") for commit in merge_commits: print(f"git cherry-pick {commit.hexsha} #{commit.committed_datetime}") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Find and sort merge commits for specified PRs.") parser.add_argument("--prs", nargs="+", required=False, type=str, help="PR numbers to find merge commits for") args = parser.parse_args() if args.prs is None: args.prs = "https://github.com/huggingface/transformers/pull/33753 https://github.com/huggingface/transformers/pull/33861 https://github.com/huggingface/transformers/pull/33906 https://github.com/huggingface/transformers/pull/33761 https://github.com/huggingface/transformers/pull/33586 https://github.com/huggingface/transformers/pull/33766 https://github.com/huggingface/transformers/pull/33958 https://github.com/huggingface/transformers/pull/33965".split() main(args.prs)
transformers/utils/patch_helper.py/0
{ "file_path": "transformers/utils/patch_helper.py", "repo_id": "transformers", "token_count": 1505 }
import torch from transformers import PreTrainedModel from .custom_configuration import CustomConfig, NoSuperInitConfig class CustomModel(PreTrainedModel): config_class = CustomConfig def __init__(self, config): super().__init__(config) self.linear = torch.nn.Linear(config.hidden_size, config.hidden_size) def forward(self, x): return self.linear(x) def _init_weights(self, module): pass class NoSuperInitModel(PreTrainedModel): config_class = NoSuperInitConfig def __init__(self, config): super().__init__(config) self.linear = torch.nn.Linear(config.attribute, config.attribute) def forward(self, x): return self.linear(x) def _init_weights(self, module): pass
transformers/utils/test_module/custom_modeling.py/0
{ "file_path": "transformers/utils/test_module/custom_modeling.py", "repo_id": "transformers", "token_count": 289 }
# Aligning Text-to-Image Diffusion Models with Reward Backpropagation [![](https://img.shields.io/badge/All_models-AlignProp-blue)](https://huggingface.co/models?other=alignprop,trl) ## The why If your reward function is differentiable, directly backpropagating gradients from the reward models to the diffusion model is significantly more sample and compute efficient (25x) than doing policy gradient algorithm like DDPO. AlignProp does full backpropagation through time, which allows updating the earlier steps of denoising via reward backpropagation. <div style="text-align: center"><img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/reward_tuning.png"/></div> ## Getting started with `examples/scripts/alignprop.py` The `alignprop.py` script is a working example of using the `AlignProp` trainer to finetune a Stable Diffusion model. This example explicitly configures a small subset of the overall parameters associated with the config object (`AlignPropConfig`). **Note:** one A100 GPU is recommended to get this running. For lower memory setting, consider setting truncated_backprop_rand to False. With default settings this will do truncated backpropagation with K=1. Almost every configuration parameter has a default. There is only one commandline flag argument that is required of the user to get things up and running. The user is expected to have a [huggingface user access token](https://huggingface.co/docs/hub/security-tokens) that will be used to upload the model post-finetuning to HuggingFace hub. The following bash command is to be entered to get things running ```batch python alignprop.py --hf_user_access_token <token> ``` To obtain the documentation of `stable_diffusion_tuning.py`, please run `python stable_diffusion_tuning.py --help` The following are things to keep in mind (The code checks this for you as well) in general while configuring the trainer (beyond the use case of using the example script) - The configurable randomized truncation range (`--alignprop_config.truncated_rand_backprop_minmax=(0,50)`) the first number should be equal and greater than 0, while the second number should equal or less to the number of diffusion timesteps (sample_num_steps) - The configurable truncation backprop absolute step (`--alignprop_config.truncated_backprop_timestep=49`) the number should be less than the number of diffusion timesteps (sample_num_steps), it only matters when truncated_backprop_rand is set to False ## Setting up the image logging hook function Expect the function to be given a dictionary with keys ```python ['image', 'prompt', 'prompt_metadata', 'rewards'] ``` and `image`, `prompt`, `prompt_metadata`, `rewards`are batched. You are free to log however you want the use of `wandb` or `tensorboard` is recommended. ### Key terms - `rewards` : The rewards/score is a numerical associated with the generated image and is key to steering the RL process - `prompt` : The prompt is the text that is used to generate the image - `prompt_metadata` : The prompt metadata is the metadata associated with the prompt. A situation where this will not be empty is when the reward model comprises of a [`FLAVA`](https://huggingface.co/docs/transformers/model_doc/flava) setup where questions and ground answers (linked to the generated image) are expected with the generated image (See here: https://github.com/kvablack/ddpo-pytorch/blob/main/ddpo_pytorch/rewards.py#L45) - `image` : The image generated by the Stable Diffusion model Example code for logging sampled images with `wandb` is given below. ```python # for logging these images to wandb def image_outputs_hook(image_data, global_step, accelerate_logger): # For the sake of this example, we only care about the last batch # hence we extract the last element of the list result = {} images, prompts, rewards = [image_data['images'],image_data['prompts'],image_data['rewards']] for i, image in enumerate(images): pil = Image.fromarray( (image.cpu().numpy().transpose(1, 2, 0) * 255).astype(np.uint8) ) pil = pil.resize((256, 256)) result[f"{prompts[i]:.25} | {rewards[i]:.2f}"] = [pil] accelerate_logger.log_images( result, step=global_step, ) ``` ### Using the finetuned model Assuming you've done with all the epochs and have pushed up your model to the hub, you can use the finetuned model as follows ```python from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipeline.to("cuda") pipeline.load_lora_weights('mihirpd/alignprop-trl-aesthetics') prompts = ["squirrel", "crab", "starfish", "whale","sponge", "plankton"] results = pipeline(prompts) for prompt, image in zip(prompts,results.images): image.save(f"dump/{prompt}.png") ``` ## Credits This work is heavily influenced by the repo [here](https://github.com/mihirp1998/AlignProp/) and the associated paper [Aligning Text-to-Image Diffusion Models with Reward Backpropagation by Mihir Prabhudesai, Anirudh Goyal, Deepak Pathak, Katerina Fragkiadaki](https://huggingface.co/papers/2310.03739).
trl/docs/source/alignprop_trainer.md/0
{ "file_path": "trl/docs/source/alignprop_trainer.md", "repo_id": "trl", "token_count": 1570 }
# PPO Trainer [![](https://img.shields.io/badge/All_models-PPO-blue)](https://huggingface.co/models?other=ppo,trl) TRL supports training LLMs with [Proximal Policy Optimization (PPO)](https://huggingface.co/papers/1707.06347). References: - [Fine-Tuning Language Models from Human Preferences](https://github.com/openai/lm-human-preferences) - [Learning to Summarize from Human Feedback](https://github.com/openai/summarize-from-feedback) - [The N Implementation Details of RLHF with PPO](https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo) - [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031) ## Get started To just run a PPO script to make sure the trainer can run, you can run the following command to train a PPO model with a dummy reward model. ```bash python examples/scripts/ppo/ppo.py \ --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ --dataset_train_split descriptiveness \ --learning_rate 3e-6 \ --num_ppo_epochs 1 \ --num_mini_batches 1 \ --output_dir models/minimal/ppo \ --per_device_train_batch_size 64 \ --gradient_accumulation_steps 1 \ --total_episodes 10000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --sft_model_path EleutherAI/pythia-1b-deduped \ --reward_model_path EleutherAI/pythia-1b-deduped \ --missing_eos_penalty 1.0 ``` ## Explanation of the logged metrics The logged metrics are as follows. Here is an example [tracked run at Weights and Biases](https://wandb.ai/huggingface/trl/runs/dd2o3g35) * `eps`: Tracks the number of episodes per second. * `objective/kl`: The mean Kullback-Leibler (KL) divergence between the current policy and reference policy. * `objective/entropy`: The mean entropy of the policy, indicating the randomness of the actions chosen by the policy. * `objective/non_score_reward`: The mean reward from non-score-related sources, basically `beta * kl.sum(1)`, where `beta` is the KL penalty coefficient and `kl` is the per-token KL divergence. * `objective/rlhf_reward`: The mean RLHF reward, which is `score - non_score_reward`. * `objective/scores`: The mean scores returned by the reward model / environment. * `policy/approxkl_avg`: The average approximate KL divergence between consecutive PPO policies. Note that this is not the same as `objective/kl`. * `policy/clipfrac_avg`: The average fraction of policy updates that are clipped, indicating how often the policy updates are constrained to prevent large changes. * `loss/policy_avg`: The average policy loss, indicating how well the policy is performing. * `loss/value_avg`: The average value loss, indicating the difference between the predicted value and the actual reward. * `val/clipfrac_avg`: The average fraction of value function updates that are clipped, similar to policy/clipfrac_avg but for the value function. * `policy/entropy_avg`: The average entropy of the policy during training, indicating how diverse the policy's actions are. * `val/ratio`: The mean ratio of the current policy probability to the old policy probability, providing a measure of how much the policy has changed. * `val/ratio_var`: The variance of the `val/ratio`, indicating the variability in policy changes. * `val/num_eos_tokens`: The number of end-of-sequence (EOS) tokens generated, which can indicate the number of complete responses. * `lr`: lr: The current learning rate used by the optimizer. * `episode`: episode: The current global step or episode count in the training process. ## Cookbook * Debugging TIP: `objective/rlhf_reward`: this is the ultimate objective of the RLHF training. If training works as intended, this metric should keep going up. * Debugging TIP: `val/ratio`: this number should float around 1.0, and it gets clipped by `--cliprange 0.2` with PPO's surrogate loss. So if this `ratio` is too high like 2.0 or 1000.0 or too small like 0.1, it means the updates between consecutive policies are too drastic. You should try understand why this is happening and try to fix it. * Memory TIP: If you are running out of memory, you can try to reduce the `--per_device_train_batch_size` or increase the `--gradient_accumulation_steps` to reduce the memory footprint. * Memory TIP: If you have multiple GPUs, you can also run training with DeepSpeed stage 3 to reduce the memory footprint `accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml`. * Usage TIP: We recommend to use the "EOS trick" via `--missing_eos_penalty`, which subtracts a static scalar penalty from the score of completions that do not end with an EOS token. This can help the model learn to generate more coherent completions. ## What is my model doing exactly? To help you understand what your model is doing, we periodically log some sample completions from the model. Here is an example of a completion. In an example [tracked run at Weights and Biases](https://wandb.ai/huggingface/trl/runs/dd2o3g35), it looks like the following, allowing you to see the model's response at different stages of training. By default we generate `--num_sample_generations 10` during training, but you can customize the number of generations. ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/ppov2_completions.gif) In the logs the sampled generations look like ``` ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┓ ┃ query ┃ model response ┃ score ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━┩ │ SUBREDDIT: r/AskReddit │ I'm in love with a friend, and │ 3.921875 │ │ │ I don't know how to get rid of │ │ │ TITLE: How do you get someone │ those feelings. I'm │ │ │ out of your head? │ desperate.<|endoftext|>[PAD][P… │ │ │ │ │ │ │ POST: Hi, │ │ │ │ I'm 22, and I have been with my │ │ │ │ girlfriend for 5 years now. We │ │ │ │ recently moved together. We've │ │ │ │ always loved each other │ │ │ │ intensely. │ │ │ │ │ │ │ │ Problem, I recently started to │ │ │ │ have feelings for an other │ │ │ │ person (a friend). This person │ │ │ │ has had a boyfriend for now 3 │ │ │ │ years, and has absolutely no │ │ │ │ ideas. Those feelings were so │ │ │ │ strong, it was hard to hide │ │ │ │ them. After 2 months of me │ │ │ │ being distant and really sad, │ │ │ │ my girlfriend forced me to say │ │ │ │ what was bothering me. I'm not │ │ │ │ a good liar, and now she knows. │ │ │ │ │ │ │ │ We decided to give us a week │ │ │ │ alone, I went to my parents. │ │ │ │ │ │ │ │ Now, I'm completely lost. I │ │ │ │ keep on thinking about this │ │ │ │ person, and I hate that. I │ │ │ │ would like for those feelings │ │ │ │ to go away, to leave me alone. │ │ │ │ But I can't. │ │ │ │ │ │ │ │ What do I do? It's been 3 │ │ │ │ months now, and I'm just │ │ │ │ desperate. │ │ │ │ │ │ │ │ TL;DR: │ │ │ ├─────────────────────────────────┼─────────────────────────────────┼──────────┤ │ SUBREDDIT: r/pettyrevenge │ My mom woke me up with a loud │ 6.84375 │ │ │ TV. I blasted Gangnam Style on │ │ │ TITLE: So, my mom woke me up │ repeat, with the bass cranked │ │ │ with a loud TV. │ up as high as it could │ │ │ │ go.<|endoftext|>[PAD][PAD][PAD… │ │ │ POST: She was in her living │ │ │ │ room, watching TV. This was at │ │ │ │ about 8:30 in the morning, and │ │ │ │ she was exercising. She turned │ │ │ │ the TV up extra loud to hear it │ │ │ │ over her excercycle, and woke │ │ │ │ me up. I went in there asking │ │ │ │ for her to turn it down. She │ │ │ │ said she didn't have to; I │ │ │ │ explained that I always used │ │ │ │ headphones so she didn't have │ │ │ │ to deal with my noise and that │ │ │ │ she should give me a little │ │ │ │ more respect, given that I paid │ │ │ │ rent at the time. │ │ │ │ │ │ │ │ She disagreed. I went back to │ │ │ │ my room, rather pissed off at │ │ │ │ the lack of equality. I had no │ │ │ │ lock on my door; but I had a │ │ │ │ dresser right next to it, so I │ │ │ │ pulled one of the drawers out │ │ │ │ enough so that it caused the │ │ │ │ door to not be openable. Then, │ │ │ │ I turned my speakers up really │ │ │ │ loud and blasted Gangnam Style │ │ │ │ on repeat, with the bass │ │ │ │ cranked up as high as it could │ │ │ │ go. │ │ │ │ │ │ │ │ If you hate Gangnam Style for │ │ │ │ being overplayed, you will see │ │ │ │ why I chose that particular │ │ │ │ song. I personally don't mind │ │ │ │ it. But here's the thing about │ │ │ │ my bass; it vibrates the walls, │ │ │ │ making one hell of a lot of │ │ │ │ noise. Needless to say, my mom │ │ │ │ was not pleased and shut off │ │ │ │ the internet. But it was oh so │ │ │ │ worth it. │ │ │ │ │ │ │ │ TL;DR: │ │ │ └─────────────────────────────────┴─────────────────────────────────┴──────────┘ ``` ## Implementation details This PPO implementation is based on the [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031). ## Benchmark experiments To validate the PPO implementation works, we ran experiment on the 1B model. Here are the command we used to run the experiment. We take the SFT / RM models directly from [The N+ Implementation Details of RLHF with PPO: A Case Study on TL;DR Summarization](https://huggingface.co/papers/2403.17031). ``` accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \ examples/scripts/ppo/ppo_tldr.py \ --output_dir models/minimal/ppo_tldr \ --learning_rate 3e-6 \ --per_device_train_batch_size 16 \ --gradient_accumulation_steps 4 \ --total_episodes 1000000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \ --reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \ --local_rollout_forward_batch_size 16 \ --missing_eos_penalty 1.0 \ --stop_token eos ``` Checkpoints and experiment tracking are available at: - [🤗 Model checkpoint](https://huggingface.co/vwxyzjn/ppo_tldr) - [🐝 Tracked experiment](https://wandb.ai/huggingface/trl/runs/dd2o3g35) To evaluate, we use [vLLM](https://github.com/vllm-project/vllm) to load the checkpoints and GPT-4o mini as a judge model to evaluate the generated TL;DR against the reference TL;DR. For more information on how to use judges, see [Judges](judges). ```bash $ python examples/scripts/evals/judge_tldr.py --model_name_or_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr --judge_model gpt-4o-mini --num_examples 1000 Model win rate: 33.00% $ python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --judge_model gpt-4o-mini --num_examples 1000 Model win rate: 64.70% ``` The PPO checkpoint gets a 64.7% preferred rate vs the 33.0% preference rate of the SFT checkpoint. This is a good sign that the PPO training is working as intended. Metrics: ![](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/ppov2.png) ```bash # pip install openrlbenchmark==0.2.1a5 # see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation # to use it, change `?we=huggingface&wpn=trl` to your own project and `?tag=pr-1540` to your own tag python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=train/episode&ceik=output_dir&cen=sft_model_path&metrics=train/objective/rlhf_reward&metrics=train/objective/scores&metrics=train/objective/kl&metrics=train/objective/non_score_reward&metrics=train/objective/entropy&metrics=train/policy/approxkl_avg&metrics=train/policy/clipfrac_avg&metrics=train/loss/policy_avg&metrics=train/loss/value_avg&metrics=train/val/clipfrac_avg&metrics=train/policy/entropy_avg&metrics=train/val/ratio&metrics=train/val/ratio_var&metrics=train/val/num_eos_tokens&metrics=train/lr&metrics=train/eps' \ "cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr?tag=pr-1540" \ --env-ids models/minimal/ppo_tldr \ --pc.ncols 4 \ --pc.ncols-legend 1 \ --pc.xlabel "Episode" \ --output-filename benchmark/trl/pr-1540/ppo \ --scan-history ``` ## PPOTrainer [[autodoc]] PPOTrainer ## PPOConfig [[autodoc]] PPOConfig
trl/docs/source/ppo_trainer.md/0
{ "file_path": "trl/docs/source/ppo_trainer.md", "repo_id": "trl", "token_count": 8409 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from datasets import load_dataset from huggingface_hub import ModelCard from transformers import HfArgumentParser @dataclass class ScriptArguments: r""" Arguments for the script. Args: model_name (`str`, *optional*, defaults to `"gpt-3.5-turbo"`): Language model to target. Possible values are: aspect (`str`, *optional*, defaults to `"helpfulness"`): Aspect to target. push_to_hub (`bool`, *optional*, defaults to `False`): Whether to push the dataset to the Hugging Face Hub. repo_id (`str`, *optional*, defaults to `"trl-lib/ultrafeedback-gpt-3.5-turbo-helpfulness"`): Hugging Face repository ID to push the dataset to. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of workers to use for dataset processing. """ model_name: str = field( default="gpt-3.5-turbo", metadata={ "help": "Language model to target.", "choices": [ "alpaca-7b", "bard", "falcon-40b-instruct", "gpt-3.5-turbo", "gpt-4", "llama-2-13b-chat", "llama-2-70b-chat", "llama-2-7b-chat", "mpt-30b-chat", "pythia-12b", "starchat", "ultralm-13b", "ultralm-65b", "vicuna-33b", "wizardlm-13b", "wizardlm-70b", "wizardlm-7b", ], }, ) aspect: str = field( default="helpfulness", metadata={ "help": "Aspect to target. Possible values are: 'helpfulness' (default), 'honesty', " "'instruction-following', 'truthfulness'.", "choices": ["helpfulness", "honesty", "instruction-following", "truthfulness"], }, ) push_to_hub: bool = field( default=False, metadata={"help": "Whether to push the dataset to the Hugging Face Hub."}, ) repo_id: str = field( default="trl-lib/ultrafeedback-gpt-3.5-turbo-helpfulness", metadata={"help": "Hugging Face repository ID to push the dataset to."}, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of workers to use for dataset processing."}, ) def to_unpaired_preference(example, model_name, aspect): prompt = [{"role": "user", "content": example["instruction"]}] model_index = example["models"].index(model_name) response_content = example["completions"][model_index]["response"] completion = [{"role": "assistant", "content": response_content}] score = int(example["completions"][model_index]["annotations"][aspect]["Rating"]) label = score >= 5 return {"prompt": prompt, "completion": completion, "label": label} model_card = ModelCard(""" --- tags: [trl] --- # UltraFeedback GPT-3.5-Turbo Helpfulness Dataset ## Summary The UltraFeedback GPT-3.5-Turbo Helpfulness dataset contains processed user-assistant interactions filtered for helpfulness, derived from the [openbmb/UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback) dataset. It is designed for fine-tuning and evaluating models in alignment tasks. ## Data Structure - **Format**: [Conversational](https://huggingface.co/docs/trl/main/dataset_formats#conversational) - **Type**: [Unpaired preference](https://huggingface.co/docs/trl/main/dataset_formats#unpaired-preference) Column: - `"prompt"`: The input question or instruction provided to the model. - `"completion"`: The model's response to the prompt. - `"label"`: A binary value indicating whether the response is sufficiently helpful. ## Generation script The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/ultrafeedback.py). """) if __name__ == "__main__": parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] dataset = load_dataset("openbmb/UltraFeedback", split="train") dataset = dataset.filter( lambda example: script_args.model_name in example["models"], batched=False, num_proc=script_args.dataset_num_proc, ) dataset = dataset.map( to_unpaired_preference, remove_columns=["source", "instruction", "models", "completions", "correct_answers", "incorrect_answers"], fn_kwargs={"model_name": script_args.model_name, "aspect": script_args.aspect}, num_proc=script_args.dataset_num_proc, ) dataset = dataset.train_test_split(test_size=0.05, seed=42) if script_args.push_to_hub: dataset.push_to_hub(script_args.repo_id) model_card.push_to_hub(script_args.repo_id, repo_type="dataset")
trl/examples/datasets/ultrafeedback.py/0
{ "file_path": "trl/examples/datasets/ultrafeedback.py", "repo_id": "trl", "token_count": 2242 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from peft import LoraConfig from transformers import AutoTokenizer, HfArgumentParser, load_tool from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, TextEnvironment os.environ["HF_ALLOW_CODE_EVAL"] = "1" os.environ["TOKENIZERS_PARALLELISM"] = "false" @dataclass class ScriptArguments: model_name: Optional[str] = field(default="bigcode/starcoderbase", metadata={"help": "the model name"}) learning_rate: Optional[float] = field(default=1e-5, metadata={"help": "the learning rate"}) mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"}) batch_size: Optional[int] = field(default=32, metadata={"help": "the batch size"}) gradient_accumulation_steps: Optional[int] = field( default=16, metadata={"help": "the number of gradient accumulation steps"} ) max_new_tokens: Optional[int] = field(default=256, metadata={"help": "max number of generated tokens per turn"}) ppo_epochs: Optional[int] = field(default=1, metadata={"help": "max number of ppo epochs"}) n_epochs: Optional[int] = field(default=32, metadata={"help": "max number of ppo epochs"}) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] def exact_match_reward(responses, answers=None): """Reward if generated response contains correct answer.""" rewards = [] pattern = r"Result\s*=\s*(-?\d+(?:\.\d+)?)\s*<submit>" # generated by chatGPT for response, answer in zip(responses, answers): reward = 0.0 try: predicted_number = None match_pattern = re.findall(pattern, response) if match_pattern: predicted_number = float(match_pattern[0]) if predicted_number is not None: if np.abs(predicted_number - float(answer)) < 0.1: reward += 1.0 except Exception: pass rewards.append(torch.tensor(reward)) return rewards def evaluate(test_dataloader, text_env, ppo_trainer): test_rewards = [] for test_batch in test_dataloader: _, _, _, rewards, _ = text_env.run(test_batch["query"], answers=test_batch["answer"]) test_rewards.extend(rewards) test_rewards = ppo_trainer.accelerator.gather_for_metrics( torch.stack(test_rewards).to(ppo_trainer.accelerator.device) ) return test_rewards.mean() lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", target_modules=["c_proj", "c_attn", "q_attn"], ) # set up models model = AutoModelForCausalLMWithValueHead.from_pretrained( script_args.model_name, use_auth_token=True, load_in_4bit=True, peft_config=lora_config, ) tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, use_auth_token=True) tokenizer.pad_token = tokenizer.eos_token ds = load_dataset("openai/gsm8k", "main", split="train") ds = ds.rename_columns({"question": "query"}) ds = ds.map(lambda x: {"answer": x["answer"].split("#### ")[1]}) ds = ds.select(range(1, len(ds))) # skip the first sample which is used in prompt ds_test = load_dataset("openai/gsm8k", "main", split="test") ds_test = ds_test.rename_columns({"question": "query"}) ds_test = ds_test.map(lambda x: {"answer": x["answer"].split("#### ")[1]}) test_dataloader = torch.utils.data.DataLoader(ds_test, batch_size=script_args.batch_size) # prompt prompt = """\ Example of using a Python API to solve math questions. Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left? <request><PythonInterpreter> def solution(): money_initial = 23 bagels = 5 bagel_cost = 3 money_spent = bagels * bagel_cost money_left = money_initial - money_spent result = money_left return result print(solution()) <call>72<response> Result = 72 <submit> Q: """ generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, "eos_token_id": -1, "max_new_tokens": script_args.max_new_tokens, } # trainer ppo_config = PPOConfig( batch_size=script_args.batch_size, learning_rate=script_args.learning_rate, mini_batch_size=script_args.mini_batch_size, ppo_epochs=script_args.ppo_epochs, gradient_accumulation_steps=script_args.gradient_accumulation_steps, log_with="wandb", tracker_project_name="trl-gsm8k", remove_unused_columns=False, optimize_cuda_cache=True, ) ppo_trainer = PPOTrainer(args=ppo_config, model=model, tokenizer=tokenizer, dataset=ds) test_dataloader = ppo_trainer.accelerator.prepare(test_dataloader) # text env text_env = TextEnvironment( model, tokenizer, [load_tool("lvwerra/python-interpreter")], exact_match_reward, prompt, max_turns=2, generation_kwargs=generation_kwargs, ) # main training loop for epoch in range(script_args.n_epochs): for step, batch in enumerate(ppo_trainer.dataloader): if (step == 0) and (epoch % 4 == 0): # evaluate every 4 epochs reward_mean_test = evaluate(test_dataloader, text_env, ppo_trainer) else: reward_mean_test = None queries, responses, masks, rewards, histories = text_env.run(batch["query"], answers=batch["answer"]) train_stats = ppo_trainer.step(queries, responses, rewards, masks) # logging if reward_mean_test is not None: train_stats["env/reward_mean_test"] = reward_mean_test texts = { "query": batch["query"], "response": [tokenizer.decode(response) for response in responses], "answer": batch["answer"], } ppo_trainer.log_stats(train_stats, texts, rewards, columns_to_log=["query", "response", "answer"]) reward_mean_test = evaluate(test_dataloader, text_env, ppo_trainer) ppo_trainer.save_pretrained(f"model/{script_args.model_name}-gsm8k")
trl/examples/research_projects/tools/python_interpreter.py/0
{ "file_path": "trl/examples/research_projects/tools/python_interpreter.py", "repo_id": "trl", "token_count": 2631 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script generates tiny models used in the TRL library for unit tests. It pushes them to the Hub under the # `trl-internal-testing` organization. # This script is meant to be run when adding new tiny model to the TRL library. from huggingface_hub import HfApi, ModelCard from transformers import ( AutoProcessor, AutoTokenizer, BartConfig, BartModel, BloomConfig, BloomForCausalLM, CLIPVisionConfig, CohereConfig, CohereForCausalLM, DbrxConfig, DbrxForCausalLM, FalconMambaConfig, FalconMambaForCausalLM, Gemma2Config, Gemma2ForCausalLM, GemmaConfig, GemmaForCausalLM, GPT2Config, GPT2LMHeadModel, GPTNeoXConfig, GPTNeoXForCausalLM, Idefics2Config, Idefics2ForConditionalGeneration, LlamaConfig, LlamaForCausalLM, LlamaForSequenceClassification, LlavaConfig, LlavaForConditionalGeneration, LlavaNextConfig, LlavaNextForConditionalGeneration, MistralConfig, MistralForCausalLM, OPTConfig, OPTForCausalLM, PaliGemmaConfig, PaliGemmaForConditionalGeneration, Phi3Config, Phi3ForCausalLM, Qwen2Config, Qwen2ForCausalLM, Qwen2ForSequenceClassification, SiglipVisionConfig, T5Config, T5ForConditionalGeneration, ) from transformers.models.idefics2.configuration_idefics2 import Idefics2VisionConfig ORGANIZATION = "trl-internal-testing" MODEL_CARD = """ --- library_name: transformers tags: [trl] --- # Tiny {model_class_name} This is a minimal model built for unit tests in the [TRL](https://github.com/huggingface/trl) library. """ api = HfApi() def push_to_hub(model, tokenizer, prefix=None, suffix=None): model_class_name = model.__class__.__name__ content = MODEL_CARD.format(model_class_name=model_class_name) model_card = ModelCard(content) if prefix is not None: model_class_name = f"{prefix}-{model_class_name}" repo_id = f"{ORGANIZATION}/{model_class_name}" if suffix is not None: repo_id += f"-{suffix}" if api.repo_exists(repo_id): print(f"Model {repo_id} already exists, skipping") else: model.push_to_hub(repo_id) tokenizer.push_to_hub(repo_id) model_card.push_to_hub(repo_id) # Decoder models for model_id, config_class, model_class, suffix in [ ("bigscience/bloomz-560m", BloomConfig, BloomForCausalLM, None), ("CohereForAI/aya-expanse-8b", CohereConfig, CohereForCausalLM, None), ("databricks/dbrx-instruct", DbrxConfig, DbrxForCausalLM, None), ("tiiuae/falcon-7b-instruct", FalconMambaConfig, FalconMambaForCausalLM, None), ("google/gemma-2-2b-it", Gemma2Config, Gemma2ForCausalLM, None), ("google/gemma-7b-it", GemmaConfig, GemmaForCausalLM, None), ("openai-community/gpt2", GPT2Config, GPT2LMHeadModel, None), ("EleutherAI/pythia-14m", GPTNeoXConfig, GPTNeoXForCausalLM, None), ("meta-llama/Meta-Llama-3-8B-Instruct", LlamaConfig, LlamaForCausalLM, "3"), ("meta-llama/Llama-3.1-8B-Instruct", LlamaConfig, LlamaForCausalLM, "3.1"), ("meta-llama/Llama-3.2-1B-Instruct", LlamaConfig, LlamaForCausalLM, "3.2"), ("mistralai/Mistral-7B-Instruct-v0.1", MistralConfig, MistralForCausalLM, "0.1"), ("mistralai/Mistral-7B-Instruct-v0.2", MistralConfig, MistralForCausalLM, "0.2"), ("facebook/opt-1.3b", OPTConfig, OPTForCausalLM, None), ("microsoft/Phi-3.5-mini-instruct", Phi3Config, Phi3ForCausalLM, None), ("Qwen/Qwen2.5-32B-Instruct", Qwen2Config, Qwen2ForCausalLM, "2.5"), ("Qwen/Qwen2.5-Coder-0.5B", Qwen2Config, Qwen2ForCausalLM, "2.5-Coder"), ]: tokenizer = AutoTokenizer.from_pretrained(model_id) config = config_class( vocab_size=tokenizer.vocab_size + len(tokenizer.added_tokens_encoder.keys()), hidden_size=8, num_attention_heads=4, num_key_value_heads=2, num_hidden_layers=2, intermediate_size=32, ) model = model_class(config) push_to_hub(model, tokenizer, "tiny", suffix) # A slightly bigger model, required for vLLM testing tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-32B-Instruct") config = Qwen2Config( vocab_size=tokenizer.vocab_size + len(tokenizer.added_tokens_encoder.keys()), hidden_size=128, # increase hidden size so that hidden_size // num_attention_heads = 32, required for vLLM num_attention_heads=4, num_key_value_heads=2, num_hidden_layers=2, intermediate_size=32, ) model = Qwen2ForCausalLM(config) push_to_hub(model, tokenizer, "small", "2.5") # Reward models for model_id, config_class, model_class, suffix in [ ("meta-llama/Llama-3.2-1B-Instruct", LlamaConfig, LlamaForSequenceClassification, "3.2"), ("Qwen/Qwen2.5-32B-Instruct", Qwen2Config, Qwen2ForSequenceClassification, "2.5"), ]: tokenizer = AutoTokenizer.from_pretrained(model_id) config = config_class( vocab_size=tokenizer.vocab_size + len(tokenizer.added_tokens_encoder.keys()), hidden_size=8, num_attention_heads=4, num_key_value_heads=2, num_hidden_layers=2, intermediate_size=32, num_labels=1, ) model = model_class(config) push_to_hub(model, tokenizer, "tiny", suffix) # Encoder-decoder models for model_id, config_class, model_class, suffix in [ ("google/flan-t5-small", T5Config, T5ForConditionalGeneration, None), ("facebook/bart-base", BartConfig, BartModel, None), ]: tokenizer = AutoTokenizer.from_pretrained(model_id) config = config_class( vocab_size=tokenizer.vocab_size + len(tokenizer.added_tokens_encoder.keys()), d_model=16, encoder_layers=2, decoder_layers=2, d_kv=2, d_ff=64, num_layers=6, num_heads=8, decoder_start_token_id=0, is_encoder_decoder=True, ) model = model_class(config) push_to_hub(model, tokenizer, "tiny", suffix) # Vision Language Models # fmt: off for model_id, config_class, text_config_class, vision_config_class, model_class in [ ("HuggingFaceM4/idefics2-8b", Idefics2Config, MistralConfig, Idefics2VisionConfig, Idefics2ForConditionalGeneration), ("llava-hf/llava-1.5-7b-hf", LlavaConfig, LlamaConfig, CLIPVisionConfig, LlavaForConditionalGeneration), ("llava-hf/llava-v1.6-mistral-7b-hf", LlavaNextConfig, MistralConfig, CLIPVisionConfig, LlavaNextForConditionalGeneration), ("google/paligemma-3b-pt-224", PaliGemmaConfig, GemmaConfig, SiglipVisionConfig, PaliGemmaForConditionalGeneration), ]: # fmt: on processor = AutoProcessor.from_pretrained(model_id) kwargs = {} if config_class == PaliGemmaConfig: kwargs["projection_dim"] = 8 vision_kwargs = {} if vision_config_class in [CLIPVisionConfig, SiglipVisionConfig]: vision_kwargs["projection_dim"] = 8 if vision_config_class == CLIPVisionConfig: vision_kwargs["image_size"] = 336 vision_kwargs["patch_size"] = 14 config = config_class( text_config=text_config_class( vocab_size=processor.tokenizer.vocab_size + len(processor.tokenizer.added_tokens_encoder), hidden_size=8, num_attention_heads=4, num_key_value_heads=2, num_hidden_layers=2, intermediate_size=32, ), vision_config=vision_config_class( hidden_size=8, num_attention_heads=4, num_hidden_layers=2, intermediate_size=32, **vision_kwargs, ), **kwargs, ) model = model_class(config) push_to_hub(model, processor, "tiny")
trl/scripts/generate_tiny_models.py/0
{ "file_path": "trl/scripts/generate_tiny_models.py", "repo_id": "trl", "token_count": 3444 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from dataclasses import dataclass from unittest.mock import mock_open, patch from trl import TrlParser @dataclass class MyDataclass: arg1: int arg2: str = "default" @dataclass class InvalidDataclass: config: str # This should raise an error in the TrlParser class TestTrlParser(unittest.TestCase): def test_init_without_config_field(self): """Test initialization without 'config' field in the dataclasses.""" parser = TrlParser(dataclass_types=[MyDataclass]) self.assertIsInstance(parser, TrlParser) def test_init_with_config_field(self): """Test initialization with a 'config' field in the dataclass (should raise ValueError).""" with self.assertRaises(ValueError) as context: TrlParser(dataclass_types=[InvalidDataclass]) self.assertTrue("has a field named 'config'" in str(context.exception)) @patch("builtins.open", mock_open(read_data="env:\n VAR1: value1\n VAR2: value2\narg1: 2")) @patch("yaml.safe_load") @patch("os.environ", new_callable=dict) # Mock os.environ as a dictionary def test_parse_args_and_config_with_valid_config(self, mock_environ, mock_yaml_load): """Test parse_args_and_config method with valid arguments and config.""" mock_yaml_load.return_value = {"env": {"VAR1": "value1", "VAR2": "value2"}, "arg1": 2} parser = TrlParser(dataclass_types=[MyDataclass]) args = ["--arg2", "value", "--config", "config.yaml"] # don't set arg1 to test default value # Simulate the config being loaded and environment variables being set result_args = parser.parse_args_and_config(args) # Set the environment variables using the mock mock_environ["VAR1"] = "value1" mock_environ["VAR2"] = "value2" # Ensure that the environment variables were set correctly self.assertEqual(mock_environ.get("VAR1"), "value1") self.assertEqual(mock_environ.get("VAR2"), "value2") # Check the parsed arguments self.assertEqual(len(result_args), 1) self.assertIsInstance(result_args[0], MyDataclass) self.assertEqual(result_args[0].arg1, 2) self.assertEqual(result_args[0].arg2, "value") @patch("builtins.open", mock_open(read_data="arg1: 2")) @patch("yaml.safe_load") def test_parse_args_and_arg_override_config(self, mock_yaml_load): """Test parse_args_and_config method and check that arguments override the config.""" mock_yaml_load.return_value = {"arg1": 2} # this arg is meant to be overridden parser = TrlParser(dataclass_types=[MyDataclass]) args = ["--arg1", "3", "--config", "config.yaml"] # override arg1 default with 3 # Simulate the config being loaded and arguments being passed result_args = parser.parse_args_and_config(args) # Check the parsed arguments self.assertEqual(len(result_args), 1) self.assertIsInstance(result_args[0], MyDataclass) self.assertEqual(result_args[0].arg1, 3) @patch("builtins.open", mock_open(read_data="env: not_a_dict")) @patch("yaml.safe_load") def test_parse_args_and_config_with_invalid_env(self, mock_yaml_load): """Test parse_args_and_config method when the 'env' field is not a dictionary.""" mock_yaml_load.return_value = {"env": "not_a_dict"} parser = TrlParser(dataclass_types=[MyDataclass]) args = ["--arg1", "2", "--arg2", "value", "--config", "config.yaml"] with self.assertRaises(ValueError) as context: parser.parse_args_and_config(args) self.assertEqual(str(context.exception), "`env` field should be a dict in the YAML file.") def test_parse_args_and_config_without_config(self): """Test parse_args_and_config without the `--config` argument.""" parser = TrlParser(dataclass_types=[MyDataclass]) args = ["--arg1", "2", "--arg2", "value"] # Simulate no config, just parse args normally result_args = parser.parse_args_and_config(args) # Check that the arguments are parsed as is self.assertEqual(len(result_args), 1) self.assertIsInstance(result_args[0], MyDataclass) self.assertEqual(result_args[0].arg1, 2) self.assertEqual(result_args[0].arg2, "value") def test_set_defaults_with_config(self): """Test set_defaults_with_config updates the defaults.""" parser = TrlParser(dataclass_types=[MyDataclass]) # Update defaults parser.set_defaults_with_config(arg1=42) # Ensure the default value is updated result_args = parser.parse_args_and_config([]) self.assertEqual(len(result_args), 1) self.assertIsInstance(result_args[0], MyDataclass) self.assertEqual(result_args[0].arg1, 42) def test_parse_args_and_config_with_remaining_strings(self): parser = TrlParser(dataclass_types=[MyDataclass]) args = ["--arg1", "2", "--arg2", "value", "remaining"] # Simulate no config, just parse args normally result_args = parser.parse_args_and_config(args, return_remaining_strings=True) # Check that the arguments are parsed as is self.assertEqual(len(result_args), 2) self.assertIsInstance(result_args[0], MyDataclass) self.assertEqual(result_args[0].arg1, 2) self.assertEqual(result_args[0].arg2, "value") self.assertEqual(result_args[1], ["remaining"]) @patch("builtins.open", mock_open(read_data="remaining_string_in_config: abc")) @patch("yaml.safe_load") def test_parse_args_and_config_with_remaining_strings_in_config_and_args(self, mock_yaml_load): mock_yaml_load.return_value = {"remaining_string_in_config": "abc"} parser = TrlParser(dataclass_types=[MyDataclass]) args = ["--arg1", "2", "--remaining_string_in_args", "def", "--config", "config.yaml"] # Simulate the config being loaded and arguments being passed result_args = parser.parse_args_and_config(args, return_remaining_strings=True) # Check that the arguments are parsed as is self.assertEqual(len(result_args), 2) self.assertIsInstance(result_args[0], MyDataclass) self.assertEqual(result_args[0].arg1, 2) self.assertEqual(result_args[1], ["--remaining_string_in_config", "abc", "--remaining_string_in_args", "def"])
trl/tests/test_cli_utils.py/0
{ "file_path": "trl/tests/test_cli_utils.py", "repo_id": "trl", "token_count": 2734 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import sys import tempfile import unittest import torch from parameterized import parameterized from transformers import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, GenerationConfig from trl import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, create_reference_model ALL_CAUSAL_LM_MODELS = [ "trl-internal-testing/tiny-BloomForCausalLM", "trl-internal-testing/tiny-CohereForCausalLM", "trl-internal-testing/tiny-DbrxForCausalLM", "trl-internal-testing/tiny-FalconMambaForCausalLM", "trl-internal-testing/tiny-Gemma2ForCausalLM", "trl-internal-testing/tiny-GemmaForCausalLM", "trl-internal-testing/tiny-GPT2LMHeadModel", "trl-internal-testing/tiny-GPTNeoXForCausalLM", "trl-internal-testing/tiny-LlamaForCausalLM-3.1", "trl-internal-testing/tiny-LlamaForCausalLM-3.2", "trl-internal-testing/tiny-LlamaForCausalLM-3", "trl-internal-testing/tiny-MistralForCausalLM-0.1", "trl-internal-testing/tiny-MistralForCausalLM-0.2", "trl-internal-testing/tiny-OPTForCausalLM", "trl-internal-testing/tiny-Phi3ForCausalLM", "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5", ] ALL_SEQ2SEQ_MODELS = [ "trl-internal-testing/tiny-T5ForConditionalGeneration", "trl-internal-testing/tiny-BartModel", ] class BaseTester: class VHeadModelTester(unittest.TestCase): all_model_names = None trl_model_class = None transformers_model_class = None def test_value_head(self): r""" Test if the v-head is added to the model successfully """ for model_name in self.all_model_names: model = self.trl_model_class.from_pretrained(model_name) self.assertTrue(hasattr(model, "v_head")) def test_value_head_shape(self): r""" Test if the v-head has the correct shape """ for model_name in self.all_model_names: model = self.trl_model_class.from_pretrained(model_name) self.assertEqual(model.v_head.summary.weight.shape[0], 1) def test_value_head_init_random(self): r""" Test if the v-head has been randomly initialized. We can check that by making sure the bias is different than zeros by default. """ for model_name in self.all_model_names: model = self.trl_model_class.from_pretrained(model_name) self.assertFalse( torch.allclose(model.v_head.summary.bias, torch.zeros_like(model.v_head.summary.bias)) ) def test_value_head_not_str(self): r""" Test if the v-head is added to the model successfully, by passing a non `PretrainedModel` as an argument to `from_pretrained`. """ for model_name in self.all_model_names: pretrained_model = self.transformers_model_class.from_pretrained(model_name) model = self.trl_model_class.from_pretrained(pretrained_model) self.assertTrue(hasattr(model, "v_head")) @unittest.skipIf(sys.platform.startswith("win"), "Skipping on Windows") def test_from_save_trl(self): """ Test if the model can be saved and loaded from a directory and get the same weights Including the additional modules (e.g. v_head) """ for model_name in self.all_model_names: model = self.trl_model_class.from_pretrained(model_name) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model_from_save = self.trl_model_class.from_pretrained(tmp_dir) # Check if the weights are the same for key in model_from_save.state_dict(): self.assertTrue(torch.allclose(model_from_save.state_dict()[key], model.state_dict()[key])) @unittest.skipIf(sys.platform.startswith("win"), "Skipping on Windows") def test_from_save_trl_sharded(self): """ Test if the model can be saved and loaded from a directory and get the same weights - sharded case """ for model_name in self.all_model_names: model = self.trl_model_class.from_pretrained(model_name) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model_from_save = self.trl_model_class.from_pretrained(tmp_dir) # Check if the weights are the same for key in model_from_save.state_dict(): self.assertTrue(torch.allclose(model_from_save.state_dict()[key], model.state_dict()[key])) @unittest.skipIf(sys.platform.startswith("win"), "Skipping on Windows") def test_from_save_transformers_sharded(self): """ Test if the model can be saved and loaded using transformers and get the same weights - sharded case """ for model_name in self.all_model_names: transformers_model = self.trl_model_class.transformers_parent_class.from_pretrained(model_name) trl_model = self.trl_model_class.from_pretrained(model_name) with tempfile.TemporaryDirectory() as tmp_dir: trl_model.save_pretrained(tmp_dir, max_shard_size="1MB") transformers_model_from_save = self.trl_model_class.transformers_parent_class.from_pretrained( tmp_dir ) # Check if the weights are the same for key in transformers_model.state_dict(): self.assertTrue( torch.allclose( transformers_model_from_save.state_dict()[key], transformers_model.state_dict()[key] ) ) @unittest.skipIf(sys.platform.startswith("win"), "Skipping on Windows") def test_from_save_transformers(self): """ Test if the model can be saved and loaded using transformers and get the same weights. We override the test of the super class to check if the weights are the same. """ for model_name in self.all_model_names: transformers_model = self.trl_model_class.transformers_parent_class.from_pretrained(model_name) trl_model = self.trl_model_class.from_pretrained(model_name) with tempfile.TemporaryDirectory() as tmp_dir: trl_model.save_pretrained(tmp_dir) transformers_model_from_save = self.trl_model_class.transformers_parent_class.from_pretrained( tmp_dir ) # Check if the weights are the same for key in transformers_model.state_dict(): self.assertTrue( torch.allclose( transformers_model_from_save.state_dict()[key], transformers_model.state_dict()[key] ) ) # Check if the trl model has the same keys as the transformers model # except the v_head for key in trl_model.state_dict(): if "v_head" not in key: self.assertIn(key, transformers_model.state_dict()) # check if the weights are the same self.assertTrue( torch.allclose(trl_model.state_dict()[key], transformers_model.state_dict()[key]) ) # check if they have the same modules self.assertEqual( set(transformers_model_from_save.state_dict().keys()), set(transformers_model.state_dict().keys()), ) class CausalLMValueHeadModelTester(BaseTester.VHeadModelTester, unittest.TestCase): """ Testing suite for v-head models. """ all_model_names = ALL_CAUSAL_LM_MODELS trl_model_class = AutoModelForCausalLMWithValueHead transformers_model_class = AutoModelForCausalLM def tearDown(self): # free memory gc.collect() def test_inference(self): r""" Test if the model can be used for inference and outputs 3 values - logits, loss, and value states """ EXPECTED_OUTPUT_SIZE = 3 for model_name in self.all_model_names: model = self.trl_model_class.from_pretrained(model_name) input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]) outputs = model(input_ids) # Check if the outputs are of the right size - here # we always output 3 values - logits, loss, and value states self.assertEqual(len(outputs), EXPECTED_OUTPUT_SIZE) def test_dropout_config(self): r""" Test if we instantiate a model by adding `summary_drop_prob` to the config it will be added to the v_head """ for model_name in self.all_model_names: pretrained_model = self.transformers_model_class.from_pretrained(model_name) pretrained_model.config.summary_dropout_prob = 0.5 model = self.trl_model_class.from_pretrained(pretrained_model) # Check if v head of the model has the same dropout as the config self.assertEqual(model.v_head.dropout.p, pretrained_model.config.summary_dropout_prob) def test_dropout_kwargs(self): r""" Test if we instantiate a model by adding `summary_drop_prob` to the config it will be added to the v_head """ for model_name in self.all_model_names: v_head_kwargs = {"summary_dropout_prob": 0.5} model = self.trl_model_class.from_pretrained(model_name, **v_head_kwargs) # Check if v head of the model has the same dropout as the config self.assertEqual(model.v_head.dropout.p, 0.5) model = self.trl_model_class.from_pretrained(model_name, summary_dropout_prob=0.5) # Check if v head of the model has the same dropout as the config self.assertEqual(model.v_head.dropout.p, 0.5) @parameterized.expand(ALL_CAUSAL_LM_MODELS) def test_generate(self, model_name): r""" Test if `generate` works for every model """ generation_config = GenerationConfig(max_new_tokens=9) model = self.trl_model_class.from_pretrained(model_name) input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]) # Just check if the generation works _ = model.generate(input_ids, generation_config=generation_config) def test_transformers_bf16_kwargs(self): r""" Test if the transformers kwargs are correctly passed Here we check that loading a model in half precision works as expected, i.e. the weights of the `pretrained_model` attribute is loaded in half precision and you can run a dummy forward pass without any issue. """ for model_name in self.all_model_names: trl_model = self.trl_model_class.from_pretrained(model_name, torch_dtype=torch.bfloat16) lm_head_namings = ["lm_head", "embed_out", "output_layer"] self.assertTrue( any(hasattr(trl_model.pretrained_model, lm_head_naming) for lm_head_naming in lm_head_namings), "Can't test the model because it doesn't have any of the expected lm_head namings", ) for lm_head_naming in lm_head_namings: if hasattr(trl_model.pretrained_model, lm_head_naming): self.assertEqual(getattr(trl_model.pretrained_model, lm_head_naming).weight.dtype, torch.bfloat16) dummy_input = torch.LongTensor([[0, 1, 0, 1]]) # check dummy forward pass works in half precision _ = trl_model(dummy_input) @unittest.skip("This test needs to be run manually due to HF token issue.") def test_push_to_hub(self): for model_name in self.all_model_names: model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name) if "sharded" in model_name: model.push_to_hub(model_name + "-ppo", use_auth_token=True, max_shard_size="1MB") else: model.push_to_hub(model_name + "-ppo", use_auth_token=True) model_from_pretrained = AutoModelForCausalLMWithValueHead.from_pretrained(model_name + "-ppo") # check all keys self.assertEqual(model.state_dict().keys(), model_from_pretrained.state_dict().keys()) for name, param in model.state_dict().items(): self.assertTrue( torch.allclose(param, model_from_pretrained.state_dict()[name]), f"Parameter {name} is not the same after push_to_hub and from_pretrained", ) class Seq2SeqValueHeadModelTester(BaseTester.VHeadModelTester, unittest.TestCase): """ Testing suite for v-head models. """ all_model_names = ALL_SEQ2SEQ_MODELS trl_model_class = AutoModelForSeq2SeqLMWithValueHead transformers_model_class = AutoModelForSeq2SeqLM def tearDown(self): # free memory gc.collect() def test_inference(self): r""" Test if the model can be used for inference and outputs 3 values - logits, loss, and value states """ EXPECTED_OUTPUT_SIZE = 3 for model_name in self.all_model_names: model = self.trl_model_class.from_pretrained(model_name) input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]) decoder_input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]) outputs = model(input_ids, decoder_input_ids=decoder_input_ids) # Check if the outputs are of the right size - here # we always output 3 values - logits, loss, and value states self.assertEqual(len(outputs), EXPECTED_OUTPUT_SIZE) def test_dropout_config(self): r""" Test if we instantiate a model by adding `summary_drop_prob` to the config it will be added to the v_head """ for model_name in self.all_model_names: pretrained_model = self.transformers_model_class.from_pretrained(model_name) pretrained_model.config.summary_dropout_prob = 0.5 model = self.trl_model_class.from_pretrained(pretrained_model) # Check if v head of the model has the same dropout as the config self.assertEqual(model.v_head.dropout.p, pretrained_model.config.summary_dropout_prob) def test_dropout_kwargs(self): r""" Test if we instantiate a model by adding `summary_drop_prob` to the config it will be added to the v_head """ for model_name in self.all_model_names: v_head_kwargs = {"summary_dropout_prob": 0.5} model = self.trl_model_class.from_pretrained(model_name, **v_head_kwargs) # Check if v head of the model has the same dropout as the config self.assertEqual(model.v_head.dropout.p, 0.5) model = self.trl_model_class.from_pretrained(model_name, summary_dropout_prob=0.5) # Check if v head of the model has the same dropout as the config self.assertEqual(model.v_head.dropout.p, 0.5) @parameterized.expand(ALL_SEQ2SEQ_MODELS) def test_generate(self, model_name): r""" Test if `generate` works for every model """ generation_config = GenerationConfig(max_new_tokens=9) model = self.trl_model_class.from_pretrained(model_name) input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]) decoder_input_ids = torch.tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]) # Just check if the generation works _ = model.generate(input_ids, decoder_input_ids=decoder_input_ids, generation_config=generation_config) def test_raise_error_not_causallm(self): # Test with a model without a LM head model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration" # This should raise a ValueError with self.assertRaises(ValueError): pretrained_model = AutoModel.from_pretrained(model_id) _ = self.trl_model_class.from_pretrained(pretrained_model) @unittest.skip("This test needs to be run manually due to HF token issue.") def test_push_to_hub(self): for model_name in self.all_model_names: model = self.trl_model_class.from_pretrained(model_name) if "sharded" in model_name: model.push_to_hub(model_name + "-ppo", use_auth_token=True, max_shard_size="1MB") else: model.push_to_hub(model_name + "-ppo", use_auth_token=True) model_from_pretrained = self.trl_model_class.from_pretrained(model_name + "-ppo") # check all keys self.assertEqual(model.state_dict().keys(), model_from_pretrained.state_dict().keys()) for name, param in model.state_dict().items(): self.assertTrue( torch.allclose(param, model_from_pretrained.state_dict()[name]), f"Parameter {name} is not the same after push_to_hub and from_pretrained", ) def test_transformers_bf16_kwargs(self): r""" Test if the transformers kwargs are correctly passed Here we check that loading a model in half precision works as expected, i.e. the weights of the `pretrained_model` attribute is loaded in half precision and you can run a dummy forward pass without any issue. """ for model_name in self.all_model_names: trl_model = self.trl_model_class.from_pretrained(model_name, torch_dtype=torch.bfloat16) lm_head_namings = self.trl_model_class.lm_head_namings self.assertTrue( any(hasattr(trl_model.pretrained_model, lm_head_naming) for lm_head_naming in lm_head_namings) ) for lm_head_naming in lm_head_namings: if hasattr(trl_model.pretrained_model, lm_head_naming): self.assertTrue(getattr(trl_model.pretrained_model, lm_head_naming).weight.dtype == torch.bfloat16) dummy_input = torch.LongTensor([[0, 1, 0, 1]]) # check dummy forward pass works in half precision _ = trl_model(input_ids=dummy_input, decoder_input_ids=dummy_input) class ReferenceModelTest(unittest.TestCase): def setUp(self): self.model = AutoModelForCausalLMWithValueHead.from_pretrained("trl-internal-testing/tiny-GPT2LMHeadModel") self.test_input = torch.tensor([[0, 1, 2, 3]]) self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=1) self.layer_format = "pretrained_model.transformer.h.{layer}.attn.c_attn.weight" def test_independent_reference(self): layer_0 = self.layer_format.format(layer=0) layer_1 = self.layer_format.format(layer=1) ref_model = create_reference_model(self.model) first_layer_before = self.model.get_parameter(layer_0).data.clone() last_layer_before = self.model.get_parameter(layer_1).data.clone() # the model only has 2 layers first_ref_layer_before = ref_model.get_parameter(layer_0).data.clone() last_ref_layer_before = ref_model.get_parameter(layer_1).data.clone() output = self.model(input_ids=self.test_input, labels=self.test_input) output[1].backward() self.optimizer.step() first_layer_after = self.model.get_parameter(layer_0).data.clone() last_layer_after = self.model.get_parameter(layer_1).data.clone() first_ref_layer_after = ref_model.get_parameter(layer_0).data.clone() last_ref_layer_after = ref_model.get_parameter(layer_1).data.clone() # before optimization ref and model are identical self.assertTrue((first_layer_before == first_ref_layer_before).all()) self.assertTrue((last_layer_before == last_ref_layer_before).all()) # ref model stays identical after optimization self.assertTrue((first_ref_layer_before == first_ref_layer_after).all()) self.assertTrue((last_ref_layer_before == last_ref_layer_after).all()) # optimized model changes self.assertFalse((first_layer_before == first_layer_after).all()) self.assertFalse((last_layer_before == last_layer_after).all()) def test_shared_layers(self): layer_0 = self.layer_format.format(layer=0) layer_1 = self.layer_format.format(layer=1) ref_model = create_reference_model(self.model, num_shared_layers=1) first_layer_before = self.model.get_parameter(layer_0).data.clone() second_layer_before = self.model.get_parameter(layer_1).data.clone() first_ref_layer_before = ref_model.get_parameter(layer_0).data.clone() second_ref_layer_before = ref_model.get_parameter(layer_1).data.clone() output = self.model(input_ids=self.test_input, labels=self.test_input) output[1].backward() self.optimizer.step() first_layer_after = self.model.get_parameter(layer_0).data.clone() second_layer_after = self.model.get_parameter(layer_1).data.clone() first_ref_layer_after = ref_model.get_parameter(layer_0).data.clone() second_ref_layer_after = ref_model.get_parameter(layer_1).data.clone() # before optimization ref and model are identical self.assertTrue((first_layer_before == first_ref_layer_before).all()) self.assertTrue((second_layer_before == second_ref_layer_before).all()) # ref model stays identical after optimization self.assertTrue((first_ref_layer_before == first_ref_layer_after).all()) self.assertTrue((second_ref_layer_before == second_ref_layer_after).all()) # first layer of optimized model stays the same self.assertTrue((first_layer_before == first_layer_after).all()) # other layers in optimized model change self.assertFalse((second_layer_before == second_layer_after).all())
trl/tests/test_modeling_value_head.py/0
{ "file_path": "trl/tests/test_modeling_value_head.py", "repo_id": "trl", "token_count": 10199 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.15.0.dev0" from typing import TYPE_CHECKING from .import_utils import OptionalDependencyNotAvailable, _LazyModule, is_diffusers_available _import_structure = { "scripts": ["init_zero_verbose", "ScriptArguments", "TrlParser"], "data_utils": [ "apply_chat_template", "extract_prompt", "is_conversational", "maybe_apply_chat_template", "maybe_extract_prompt", "maybe_unpair_preference_dataset", "pack_examples", "unpair_preference_dataset", ], "environment": ["TextEnvironment", "TextHistory"], "extras": ["BestOfNSampler"], "import_utils": [ "is_deepspeed_available", "is_diffusers_available", "is_llm_blender_available", "is_mergekit_available", "is_rich_available", "is_unsloth_available", "is_vllm_available", ], "models": [ "SUPPORTED_ARCHITECTURES", "AutoModelForCausalLMWithValueHead", "AutoModelForSeq2SeqLMWithValueHead", "PreTrainedModelWrapper", "create_reference_model", "setup_chat_format", ], "trainer": [ "AlignPropConfig", "AlignPropTrainer", "AllTrueJudge", "BaseBinaryJudge", "BaseJudge", "BasePairwiseJudge", "BaseRankJudge", "BCOConfig", "BCOTrainer", "CPOConfig", "CPOTrainer", "DataCollatorForCompletionOnlyLM", "DPOConfig", "DPOTrainer", "FDivergenceConstants", "FDivergenceType", "GKDConfig", "GKDTrainer", "GRPOConfig", "GRPOTrainer", "HfPairwiseJudge", "IterativeSFTTrainer", "KTOConfig", "KTOTrainer", "LogCompletionsCallback", "MergeModelCallback", "ModelConfig", "NashMDConfig", "NashMDTrainer", "OnlineDPOConfig", "OnlineDPOTrainer", "OpenAIPairwiseJudge", "ORPOConfig", "ORPOTrainer", "PairRMJudge", "PPOConfig", "PPOTrainer", "PRMConfig", "PRMTrainer", "RewardConfig", "RewardTrainer", "RLOOConfig", "RLOOTrainer", "SFTConfig", "SFTTrainer", "WinRateCallback", "XPOConfig", "XPOTrainer", ], "trainer.callbacks": ["MergeModelCallback", "RichProgressCallback", "SyncRefModelCallback"], "trainer.utils": ["get_kbit_device_map", "get_peft_config", "get_quantization_config", "compute_token_accuracy"], } try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["models"].extend( [ "DDPOPipelineOutput", "DDPOSchedulerOutput", "DDPOStableDiffusionPipeline", "DefaultDDPOStableDiffusionPipeline", ] ) _import_structure["trainer"].extend(["DDPOConfig", "DDPOTrainer"]) if TYPE_CHECKING: from .data_utils import ( apply_chat_template, extract_prompt, is_conversational, maybe_apply_chat_template, maybe_extract_prompt, maybe_unpair_preference_dataset, pack_examples, unpair_preference_dataset, ) from .environment import TextEnvironment, TextHistory from .extras import BestOfNSampler from .import_utils import ( is_deepspeed_available, is_diffusers_available, is_llm_blender_available, is_mergekit_available, is_rich_available, is_unsloth_available, is_vllm_available, ) from .models import ( SUPPORTED_ARCHITECTURES, AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead, PreTrainedModelWrapper, create_reference_model, setup_chat_format, ) from .scripts import ScriptArguments, TrlParser, init_zero_verbose from .trainer import ( AlignPropConfig, AlignPropTrainer, AllTrueJudge, BaseBinaryJudge, BaseJudge, BasePairwiseJudge, BaseRankJudge, BCOConfig, BCOTrainer, CPOConfig, CPOTrainer, DataCollatorForCompletionOnlyLM, DPOConfig, DPOTrainer, FDivergenceConstants, FDivergenceType, GKDConfig, GKDTrainer, GRPOConfig, GRPOTrainer, HfPairwiseJudge, IterativeSFTTrainer, KTOConfig, KTOTrainer, LogCompletionsCallback, MergeModelCallback, ModelConfig, NashMDConfig, NashMDTrainer, OnlineDPOConfig, OnlineDPOTrainer, OpenAIPairwiseJudge, ORPOConfig, ORPOTrainer, PairRMJudge, PPOConfig, PPOTrainer, PRMConfig, PRMTrainer, RewardConfig, RewardTrainer, RLOOConfig, RLOOTrainer, SFTConfig, SFTTrainer, WinRateCallback, XPOConfig, XPOTrainer, ) from .trainer.callbacks import RichProgressCallback, SyncRefModelCallback from .trainer.utils import compute_token_accuracy, get_kbit_device_map, get_peft_config, get_quantization_config try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .models import ( DDPOPipelineOutput, DDPOSchedulerOutput, DDPOStableDiffusionPipeline, DefaultDDPOStableDiffusionPipeline, ) from .trainer import DDPOConfig, DDPOTrainer else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, extra_objects={"__version__": __version__}, )
trl/trl/__init__.py/0
{ "file_path": "trl/trl/__init__.py", "repo_id": "trl", "token_count": 3079 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ State dict utilities: utility methods for converting state dicts easily File copied from diffusers to avoid import issues and make TRL compatible with most of diffusers versions. """ import enum class StateDictType(enum.Enum): """ The mode to use when converting state dicts. """ DIFFUSERS_OLD = "diffusers_old" PEFT = "peft" PEFT_TO_DIFFUSERS = { ".q_proj.lora_B": ".q_proj.lora_linear_layer.up", ".q_proj.lora_A": ".q_proj.lora_linear_layer.down", ".k_proj.lora_B": ".k_proj.lora_linear_layer.up", ".k_proj.lora_A": ".k_proj.lora_linear_layer.down", ".v_proj.lora_B": ".v_proj.lora_linear_layer.up", ".v_proj.lora_A": ".v_proj.lora_linear_layer.down", ".out_proj.lora_B": ".out_proj.lora_linear_layer.up", ".out_proj.lora_A": ".out_proj.lora_linear_layer.down", "to_k.lora_A": "to_k.lora.down", "to_k.lora_B": "to_k.lora.up", "to_q.lora_A": "to_q.lora.down", "to_q.lora_B": "to_q.lora.up", "to_v.lora_A": "to_v.lora.down", "to_v.lora_B": "to_v.lora.up", "to_out.0.lora_A": "to_out.0.lora.down", "to_out.0.lora_B": "to_out.0.lora.up", } DIFFUSERS_OLD_TO_DIFFUSERS = { ".to_q_lora.up": ".q_proj.lora_linear_layer.up", ".to_q_lora.down": ".q_proj.lora_linear_layer.down", ".to_k_lora.up": ".k_proj.lora_linear_layer.up", ".to_k_lora.down": ".k_proj.lora_linear_layer.down", ".to_v_lora.up": ".v_proj.lora_linear_layer.up", ".to_v_lora.down": ".v_proj.lora_linear_layer.down", ".to_out_lora.up": ".out_proj.lora_linear_layer.up", ".to_out_lora.down": ".out_proj.lora_linear_layer.down", } DIFFUSERS_STATE_DICT_MAPPINGS = { StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_DIFFUSERS, StateDictType.PEFT: PEFT_TO_DIFFUSERS, } KEYS_TO_ALWAYS_REPLACE = { ".processor.": ".", } def convert_state_dict(state_dict, mapping): r""" Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. mapping (`dict[str, str]`): The mapping to use for conversion, the mapping should be a dictionary with the following structure: - key: the pattern to replace - value: the pattern to replace with Returns: converted_state_dict (`dict`) The converted state dict. """ converted_state_dict = {} for k, v in state_dict.items(): # First, filter out the keys that we always want to replace for pattern in KEYS_TO_ALWAYS_REPLACE.keys(): if pattern in k: new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern] k = k.replace(pattern, new_pattern) for pattern in mapping.keys(): if pattern in k: new_pattern = mapping[pattern] k = k.replace(pattern, new_pattern) break converted_state_dict[k] = v return converted_state_dict def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): r""" Converts a state dict to new diffusers format. The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will return the state dict as is. The method only supports the conversion from diffusers old, PEFT to diffusers new for now. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. original_type (`StateDictType`, *optional*): The original type of the state dict, if not provided, the method will try to infer it automatically. kwargs (`dict`, *args*): Additional arguments to pass to the method. - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in `get_peft_model_state_dict` method: https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 but we add it here in case we don't want to rely on that method. """ peft_adapter_name = kwargs.pop("adapter_name", None) if peft_adapter_name is not None: peft_adapter_name = "." + peft_adapter_name else: peft_adapter_name = "" if original_type is None: # Old diffusers to PEFT if any("to_out_lora" in k for k in state_dict.keys()): original_type = StateDictType.DIFFUSERS_OLD elif any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): original_type = StateDictType.PEFT elif any("lora_linear_layer" in k for k in state_dict.keys()): # nothing to do return state_dict else: raise ValueError("Could not automatically infer state dict type") if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys(): raise ValueError(f"Original type {original_type} is not supported") mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type] return convert_state_dict(state_dict, mapping)
trl/trl/models/sd_utils.py/0
{ "file_path": "trl/trl/models/sd_utils.py", "repo_id": "trl", "token_count": 2508 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import List, Optional, Union import pandas as pd import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.utils import gather_object, is_comet_ml_available, is_deepspeed_available, is_wandb_available from rich.console import Console, Group from rich.live import Live from rich.panel import Panel from rich.progress import Progress from transformers import ( GenerationConfig, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainerCallback, TrainerControl, TrainerState, TrainingArguments, ) from transformers.trainer_utils import has_length from ..data_utils import maybe_apply_chat_template from ..import_utils import is_mergekit_available from ..mergekit_utils import MergeConfig, merge_models, upload_model_to_hf from ..models.utils import unwrap_model_for_generation from .judges import BasePairwiseJudge from .utils import log_table_to_comet_experiment if is_deepspeed_available(): import deepspeed if is_comet_ml_available(): pass if is_wandb_available(): import wandb def _generate_completions( prompts: list[str], model: PreTrainedModel, tokenizer: PreTrainedTokenizerBase, accelerator: Accelerator, generation_config: Optional[GenerationConfig], batch_size: int = 1, ) -> list[str]: """ Generates completions for a list of pre-formatted prompts from the given model. Args: prompts (list[str]): A list of input prompts for which completions are to be generated. model (PreTrainedModel): The pre-trained model to be used for generation. tokenizer (PreTrainedTokenizerBase): The tokenizer to be used for encoding and decoding. accelerator (Accelerator): The accelerator to be used for model execution. generation_config (GenerationConfig): Configuration for text generation. batch_size (int, optional): The number of prompts to process in each batch. Default is 1. Returns: list[str]: A list of generated text completions corresponding to the input prompts. """ completions = [] with unwrap_model_for_generation(model, accelerator) as unwrapped_model: for idx in range(0, len(prompts), batch_size): batch = prompts[idx : idx + batch_size] tokenized_batch = tokenizer(batch, return_tensors="pt", padding=True, truncation=True).to(model.device) generations = unwrapped_model.generate( **tokenized_batch, generation_config=generation_config, ) for prompt, generation in zip(tokenized_batch.input_ids, generations): # Remove prompt from generation generation = generation[len(prompt) :] completion = tokenizer.decode(generation, skip_special_tokens=True) completions.append(completion) return completions class SyncRefModelCallback(TrainerCallback): """ Callback to synchronize the model with a reference model. """ def __init__( self, ref_model: Union[PreTrainedModel, torch.nn.Module], accelerator: Optional[Accelerator], ): self.accelerator = accelerator self.ref_model = ref_model @staticmethod def _sync_target_model(model, target_model, alpha): for target_param, copy_param in zip(target_model.parameters(), model.parameters()): target_param.data.mul_(1.0 - alpha).add_(copy_param.data, alpha=alpha) @staticmethod def sync_target_model(model, target_model, alpha): deepspeed_plugin = AcceleratorState().deepspeed_plugin if deepspeed_plugin is not None and deepspeed_plugin.zero_stage == 3: with deepspeed.zero.GatheredParameters( list(model.parameters()) + list(target_model.parameters()), modifier_rank=0 ): if deepspeed.comm.get_rank() == 0: SyncRefModelCallback._sync_target_model(model, target_model, alpha) else: SyncRefModelCallback._sync_target_model(model, target_model, alpha) def on_step_end(self, args, state, control, **kwargs): model: PreTrainedModel = kwargs["model"] if self.ref_model is not None and state.global_step % args.ref_model_sync_steps == 0: if self.accelerator: model = self.accelerator.unwrap_model(model) self.sync_target_model(model, self.ref_model, args.ref_model_mixup_alpha) class RichProgressCallback(TrainerCallback): """ A [`TrainerCallback`] that displays the progress of training or evaluation using Rich. """ def __init__(self): self.training_bar = None self.prediction_bar = None self.training_task_id = None self.prediction_task_id = None self.rich_group = None self.rich_console = None self.training_status = None self.current_step = None def on_train_begin(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar = Progress() self.prediction_bar = Progress() self.rich_console = Console() self.training_status = self.rich_console.status("Nothing to log yet ...") self.rich_group = Live(Panel(Group(self.training_bar, self.prediction_bar, self.training_status))) self.rich_group.start() self.training_task_id = self.training_bar.add_task("[blue]Training the model", total=state.max_steps) self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.training_bar.update(self.training_task_id, advance=state.global_step - self.current_step, update=True) self.current_step = state.global_step def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_world_process_zero and has_length(eval_dataloader): if self.prediction_task_id is None: self.prediction_task_id = self.prediction_bar.add_task( "[blue]Predicting on the evaluation dataset", total=len(eval_dataloader) ) self.prediction_bar.update(self.prediction_task_id, advance=1, update=True) def on_evaluate(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_task_id is not None: self.prediction_bar.remove_task(self.prediction_task_id) self.prediction_task_id = None def on_predict(self, args, state, control, **kwargs): if state.is_world_process_zero: if self.prediction_task_id is not None: self.prediction_bar.remove_task(self.prediction_task_id) self.prediction_task_id = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_world_process_zero and self.training_bar is not None: _ = logs.pop("total_flos", None) self.training_status.update(f"[bold green]Status = {str(logs)}") def on_train_end(self, args, state, control, **kwargs): if state.is_world_process_zero: self.rich_group.stop() self.training_bar = None self.prediction_bar = None self.training_task_id = None self.prediction_task_id = None self.rich_group = None self.rich_console = None self.training_status = None self.current_step = None def _win_rate_completions_df( state: TrainerState, prompts: List[str], completions: List[str], winner_indices: List[str] ) -> pd.DataFrame: global_step = [str(state.global_step)] * len(prompts) data = list(zip(global_step, prompts, completions, winner_indices)) # Split completions from reference model and policy split_data = [(item[0], item[1], item[2][0], item[2][1], item[3]) for item in data] return pd.DataFrame(split_data, columns=["step", "prompt", "reference_model", "policy", "winner_index"]) class WinRateCallback(TrainerCallback): """ A [`~transformers.TrainerCallback`] that computes the win rate of a model based on a reference. It generates completions using prompts from the evaluation dataset and compares the trained model's outputs against a reference. The reference is either the initial version of the model (before training) or the reference model, if available in the trainer. During each evaluation step, a judge determines how often the trained model's completions win against the reference using a judge. The win rate is then logged in the trainer's logs under the key `"eval_win_rate"`. Usage: ```python trainer = DPOTrainer(...) judge = PairRMJudge() win_rate_callback = WinRateCallback(judge=judge, trainer=trainer) trainer.add_callback(win_rate_callback) ``` Args: judge (`BasePairwiseJudge`): The judge to use for comparing completions. trainer (`Trainer`): Trainer to which the callback will be attached. The trainer's evaluation dataset must include a `"prompt"` column containing the prompts for generating completions. If the `Trainer` has a reference model (via the `ref_model` attribute), it will use this reference model for generating the reference completions; otherwise, it defaults to using the initial model. generation_config (`GenerationConfig`, *optional*): The generation config to use for generating completions. num_prompts (`int` or `None`, *optional*, defaults to `None`): The number of prompts to generate completions for. If not provided, defaults to the number of examples in the evaluation dataset. shuffle_order (`bool`, *optional*, defaults to `True`): Whether to shuffle the order of the completions before judging. use_soft_judge (`bool`, *optional*, defaults to `False`): Whether to use a soft judge that returns a win probability between 0 and 1 for the first completion vs the second. """ def __init__( self, judge: BasePairwiseJudge, trainer: Trainer, generation_config: Optional[GenerationConfig] = None, num_prompts: Optional[int] = None, shuffle_order: bool = True, use_soft_judge: bool = False, ): self.judge = judge self.trainer = trainer self.shuffle_order = shuffle_order self.generation_config = generation_config self.ref_completions = [] self.use_soft_judge = use_soft_judge if self.trainer.eval_dataset is None: raise ValueError("Trainer must have an evaluation dataset to use the WinRateCallback.") else: self.eval_dataset = self.trainer.eval_dataset if num_prompts is not None: self.eval_dataset = self.eval_dataset.select(range(num_prompts)) def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): # When the trainer is initialized, we generate completions for the reference model. tokenizer = kwargs["processing_class"] tokenizer.padding_side = "left" accelerator = self.trainer.accelerator # Use the reference model if available, otherwise use the initial model model = getattr(self.trainer, "ref_model", None) # At this point, there are two cases where `ref_model` is None: # 1. The method doesn't require a reference model. # 2. The method uses a reference model, but `ref_model` is set to None. # This occurs when using PEFT, where the reference model can be obtained by simply disabling the model's adapter. # In theory, we should disable the adapter here, but since it's zero-initialized at the start of training, # the model behaves identically with or without the adapter. # Therefore, there's no need to explicitly disable it at this point. if model is None: model = self.trainer.model_wrapped with accelerator.split_between_processes(self.eval_dataset["prompt"]) as prompts: self.ref_completions = _generate_completions( prompts, model=model, tokenizer=tokenizer, accelerator=accelerator, generation_config=self.generation_config, batch_size=args.per_device_eval_batch_size, ) # Compute initial win rate as a reference point completions = list(zip(self.ref_completions, self.ref_completions)) if self.use_soft_judge: ref_win_probs = self.judge.judge(prompts, completions, self.shuffle_order, return_scores=True) winner_indices = [0 if score > 0.5 else 1 for score in ref_win_probs] ref_win_probs = gather_object(ref_win_probs) else: winner_indices = self.judge.judge(prompts, completions, self.shuffle_order) prompts = gather_object(prompts) completions = gather_object(completions) winner_indices = gather_object(winner_indices) # Logging if self.trainer.accelerator.is_main_process: win_rate = sum(winner_idx == 1 for winner_idx in winner_indices) / len(winner_indices) if self.use_soft_judge: avg_win_prob = 1.0 - sum(ref_win_probs) / len(ref_win_probs) self.trainer.log({"eval_avg_win_prob": avg_win_prob, "eval_win_rate": win_rate}) else: self.trainer.log({"eval_win_rate": win_rate}) if "wandb" in args.report_to: import wandb if wandb.run is not None: df = _win_rate_completions_df( state=state, prompts=prompts, completions=completions, winner_indices=winner_indices, ) wandb.log({"win_rate_completions": wandb.Table(dataframe=df)}) if "comet_ml" in args.report_to: df = _win_rate_completions_df( state=state, prompts=prompts, completions=completions, winner_indices=winner_indices, ) log_table_to_comet_experiment( name="win_rate_completions.csv", table=df, ) def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): # At every evaluation step, we generate completions for the model and compare them with the reference # completions that have been generated at the beginning of training. We then compute the win rate and log it to # the trainer. tokenizer = kwargs["processing_class"] tokenizer.padding_side = "left" accelerator = self.trainer.accelerator model = self.trainer.model_wrapped with accelerator.split_between_processes(self.eval_dataset["prompt"]) as prompts: completions = _generate_completions( prompts, model=model, tokenizer=tokenizer, accelerator=accelerator, generation_config=self.generation_config, batch_size=args.per_device_eval_batch_size, ) completions = list(zip(self.ref_completions, completions)) if self.use_soft_judge: ref_win_probs = self.judge.judge(prompts, completions, self.shuffle_order, return_scores=True) winner_indices = [0 if score > 0.5 else 1 for score in ref_win_probs] ref_win_probs = gather_object(ref_win_probs) else: winner_indices = self.judge.judge(prompts, completions, self.shuffle_order) prompts = gather_object(prompts) completions = gather_object(completions) winner_indices = gather_object(winner_indices) # Logging if self.trainer.accelerator.is_main_process: win_rate = sum(winner_idx == 1 for winner_idx in winner_indices) / len(winner_indices) if self.use_soft_judge: avg_win_prob = 1.0 - sum(ref_win_probs) / len(ref_win_probs) self.trainer.log({"eval_avg_win_prob": avg_win_prob, "eval_win_rate": win_rate}) else: self.trainer.log({"eval_win_rate": win_rate}) if "wandb" in args.report_to: import wandb if wandb.run is not None: df = _win_rate_completions_df( state=state, prompts=prompts, completions=completions, winner_indices=winner_indices, ) wandb.log({"win_rate_completions": wandb.Table(dataframe=df)}) if "comet_ml" in args.report_to: df = _win_rate_completions_df( state=state, prompts=prompts, completions=completions, winner_indices=winner_indices, ) log_table_to_comet_experiment( name="win_rate_completions.csv", table=df, ) class LogCompletionsCallback(TrainerCallback): r""" A [`~transformers.TrainerCallback`] that logs completions to Weights & Biases and/or Comet. Usage: ```python trainer = DPOTrainer(...) completions_callback = LogCompletionsCallback(trainer=trainer) trainer.add_callback(completions_callback) ``` Args: trainer (`Trainer`): Trainer to which the callback will be attached. The trainer's evaluation dataset must include a `"prompt"` column containing the prompts for generating completions. generation_config (`GenerationConfig`, *optional*): The generation config to use for generating completions. num_prompts (`int` or `None`, *optional*): The number of prompts to generate completions for. If not provided, defaults to the number of examples in the evaluation dataset. freq (`int` or `None`, *optional*): The frequency at which to log completions. If not provided, defaults to the trainer's `eval_steps`. """ def __init__( self, trainer: Trainer, generation_config: Optional[GenerationConfig] = None, num_prompts: Optional[int] = None, freq: Optional[int] = None, ): self.trainer = trainer self.generation_config = generation_config self.freq = freq self.table = [] self._last_logged_step = -1 if self.trainer.eval_dataset is None: raise ValueError("Trainer must have an evaluation dataset to use the LogCompletionsCallback.") else: self.eval_dataset = self.trainer.eval_dataset if num_prompts is not None: self.eval_dataset = self.eval_dataset.select(range(num_prompts)) def on_step_end(self, args, state, control, **kwargs): # Only log once per step (this method may be called multiple times) if state.global_step == self._last_logged_step: return # Only log every `freq` steps (if no `freq` is provided, log every `eval_steps` steps) freq = self.freq or state.eval_steps if state.global_step % freq != 0: return tokenizer = kwargs["processing_class"] tokenizer.padding_side = "left" accelerator = self.trainer.accelerator model = self.trainer.model_wrapped with accelerator.split_between_processes(self.eval_dataset["prompt"]) as prompts: prompts = [maybe_apply_chat_template({"prompt": prompt}, tokenizer)["prompt"] for prompt in prompts] completions = _generate_completions( prompts, model=model, tokenizer=tokenizer, accelerator=accelerator, generation_config=self.generation_config, batch_size=args.per_device_eval_batch_size, ) completions = gather_object(completions) prompts = gather_object(prompts) # Build the data to log if self.trainer.accelerator.is_main_process: global_step = [str(state.global_step)] * len(prompts) data = list(zip(global_step, prompts, completions)) self.table.extend(data) table = pd.DataFrame(columns=["step", "prompt", "completion"], data=self.table) if "wandb" in args.report_to: wandb.log({"completions": table}) if "comet_ml" in args.report_to: log_table_to_comet_experiment( name="completions.csv", table=table, ) # Save the last logged step, so we don't log the same completions multiple times self._last_logged_step = state.global_step class MergeModelCallback(TrainerCallback): r""" A [`~transformers.TrainerCallback`] that merges the policy model (the model being trained) with another model based on a merge configuration. Args: merge_config ([`MergeConfig`], *optional*, defaults to `None`): Configuration used for the merging process. If not provided, the default [`MergeConfig`] is used. merge_at_every_checkpoint (`bool`, *optional*, defaults to `False`): Whether to merge the model at every checkpoint. push_to_hub (`bool`, *optional*, defaults to `False`): Whether to push the merged model to the Hub after merging. Example: ```python !pip install trl[mergekit] from trl.mergekit_utils import MergeConfig from trl import MergeModelCallback config = MergeConfig() merge_callback = MergeModelCallback(config) trainer = DPOTrainer(..., callbacks=[merge_callback]) ``` """ def __init__( self, merge_config: Optional["MergeConfig"] = None, merge_at_every_checkpoint: bool = False, push_to_hub: bool = False, ): if not is_mergekit_available(): raise ImportError( "MergeModelCallback requires the `mergekit` extra. To install, run `pip install trl[mergekit]`." ) self.merge_config = merge_config or MergeConfig() self.merge_at_every_checkpoint = merge_at_every_checkpoint self.push_to_hub = push_to_hub def _merge_and_maybe_push(self, output_dir, global_step, model): checkpoint_path = os.path.join(output_dir, f"checkpoint-{global_step}") self.merge_config.policy_model_path = checkpoint_path if self.merge_config.target_model_path is None: self.merge_config.target_model_path = model.config._name_or_path merge_path = os.path.join(checkpoint_path, "merged") merge_models(self.merge_config.create(), merge_path) if self.push_to_hub: repo_name = f"{output_dir}_checkpoint-{global_step}_merged" upload_model_to_hf(merge_path, repo_name) def on_save(self, args, state, control, model=None, **kwargs): if self.merge_at_every_checkpoint: self._merge_and_maybe_push(args.output_dir, state.global_step, model) def on_train_end(self, args, state, control, model=None, **kwargs): if not self.merge_at_every_checkpoint: self._merge_and_maybe_push(args.output_dir, state.global_step, model)
trl/trl/trainer/callbacks.py/0
{ "file_path": "trl/trl/trainer/callbacks.py", "repo_id": "trl", "token_count": 10326 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from trl.trainer.online_dpo_config import OnlineDPOConfig @dataclass class NashMDConfig(OnlineDPOConfig): r""" Configuration class for the [`NashMDTrainer`]. Subclass of [`OnlineDPOConfig`] we can use all its arguments and add the following: Parameters: mixture_coef (`float` or `list[float]`, *optional*, defaults to `0.5`): Logit mixture coefficient for the model and reference model. If a list of floats is provided then the mixture coefficient is selected for each new epoch and the last coefficient is used for the rest of the epochs. """ mixture_coef: list[float] = field( default_factory=lambda: [0.5], metadata={ "help": "Logit mixture coefficient for the model and reference model. If a list of floats is provided " "then the mixture coefficient is selected for each new epoch and the last coefficient is used for the " "rest of the epochs." }, ) def __post_init__(self): super().__post_init__() if hasattr(self.mixture_coef, "__len__") and len(self.mixture_coef) == 1: self.mixture_coef = self.mixture_coef[0]
trl/trl/trainer/nash_md_config.py/0
{ "file_path": "trl/trl/trainer/nash_md_config.py", "repo_id": "trl", "token_count": 621 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import importlib.resources as pkg_resources import json import random import warnings from collections import deque from dataclasses import dataclass, field from importlib.metadata import version from typing import Any, Literal, Optional, Union import datasets import numpy as np import pandas as pd import torch import torch.nn.functional as F import torch.utils.data from accelerate import Accelerator, PartialState from accelerate.state import AcceleratorState from huggingface_hub import ModelCard, ModelCardData from rich.console import Console from rich.table import Table from torch.nn.utils.rnn import pad_sequence from torch.utils.data import IterableDataset from transformers import ( BitsAndBytesConfig, DataCollatorForLanguageModeling, EvalPrediction, GenerationConfig, PreTrainedTokenizerBase, TrainerState, TrainingArguments, is_comet_available, ) from transformers.utils import ( is_peft_available, is_torch_mlu_available, is_torch_npu_available, is_torch_xpu_available, ) from ..trainer.model_config import ModelConfig if is_comet_available(): import comet_ml if is_peft_available(): from peft import LoraConfig, PeftConfig class DataCollatorForCompletionOnlyLM(DataCollatorForLanguageModeling): """ Data collator used for completion tasks. It ensures that all the tokens of the labels are set to an 'ignore_index' when they do not come from the assistant. This ensure that the loss is only calculated on the completion made by the assistant. Args: response_template (`Union[str, list[int]]`): the template form that indicates the start of the response, typically something like '### Response:\n'. It can also be passed as tokenized ids, which can be useful when using a tokenizer that encodes the response differently if it does not have proper context. instruction_template (`Union[str, list[int]]`): the template form that indicates the start of the human instruction, typically something like '### Human:\n'. Useful for assistant-style conversation datasets. It can also be passed as tokenized ids. mlm (`bool`, *optional*, defaults to `False`): Whether to use masked language modeling in the underlying `DataCollatorForLanguageModeling` class. Note that this option currently has no effect but is present for flexibility and backwards-compatibility. ignore_index (`int`, *optional*, defaults to `-100`): The index to use to ignore the initial tokens with """ def __init__( self, response_template: Union[str, list[int]], instruction_template: Optional[Union[str, list[int]]] = None, *args, mlm: bool = False, ignore_index: int = -100, padding_free: bool = False, **kwargs, ): super().__init__(*args, mlm=mlm, **kwargs) self.instruction_template = instruction_template if isinstance(instruction_template, str): # The user provides a string, must tokenize self.instruction_token_ids = self.tokenizer.encode(self.instruction_template, add_special_tokens=False) else: # The user already provides the token ids self.instruction_token_ids = instruction_template self.response_template = response_template if isinstance(response_template, str): # The user provides a string, must tokenize self.response_token_ids = self.tokenizer.encode(self.response_template, add_special_tokens=False) else: # The user already provides the token ids self.response_token_ids = response_template if not self.mlm and self.instruction_template and self.tokenizer.pad_token_id == self.tokenizer.eos_token_id: warnings.warn( "The pad_token_id and eos_token_id values of this tokenizer are identical. " "If you are planning for multi-turn training, " "it can result in the model continuously generating questions and answers without eos token. " "To avoid this, set the pad_token_id to a different value.", UserWarning, ) self.ignore_index = ignore_index self.padding_free = padding_free def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]: batch = super().torch_call(examples) if self.instruction_template is None: for i in range(len(examples)): response_token_ids_start_idx = None for idx in np.where(batch["labels"][i] == self.response_token_ids[0])[0]: # `response_token_ids` is `'### Response:\n'`, here we are just making sure that the token IDs match if ( self.response_token_ids == batch["labels"][i][idx : idx + len(self.response_token_ids)].tolist() ): response_token_ids_start_idx = idx if response_token_ids_start_idx is None: warnings.warn( f"Could not find response key `{self.response_template}` in the following instance: " f"{self.tokenizer.decode(batch['input_ids'][i])}. This instance will be ignored in loss " "calculation. Note, if this happens often, consider increasing the `max_seq_length`.", UserWarning, ) batch["labels"][i, :] = self.ignore_index else: response_token_ids_end_idx = response_token_ids_start_idx + len(self.response_token_ids) # Make pytorch loss function ignore all tokens up through the end of the response key batch["labels"][i, :response_token_ids_end_idx] = self.ignore_index else: for i in range(len(examples)): response_token_ids_idxs = [] human_token_ids_idxs = [] for assistant_idx in np.where(batch["labels"][i] == self.response_token_ids[0])[0]: # find the indexes of the start of a response. if ( self.response_token_ids == batch["labels"][i][assistant_idx : assistant_idx + len(self.response_token_ids)].tolist() ): response_token_ids_idxs.append(assistant_idx + len(self.response_token_ids)) if len(response_token_ids_idxs) == 0: warnings.warn( f"Could not find response key `{self.response_template}` in the following instance: " f"{self.tokenizer.decode(batch['input_ids'][i])}. This instance will be ignored in loss " "calculation. Note, if this happens often, consider increasing the `max_seq_length`.", UserWarning, ) batch["labels"][i, :] = self.ignore_index human_token_ids = self.instruction_token_ids for human_idx in np.where(batch["labels"][i] == human_token_ids[0])[0]: # find the indexes of the start of a human answer. if human_token_ids == batch["labels"][i][human_idx : human_idx + len(human_token_ids)].tolist(): human_token_ids_idxs.append(human_idx) if len(human_token_ids_idxs) == 0: warnings.warn( f"Could not find instruction key `{self.instruction_template}` in the following instance: " f"{self.tokenizer.decode(batch['input_ids'][i])}. This instance will be ignored in loss " "calculation. Note, if this happens often, consider increasing the `max_seq_length`.", UserWarning, ) batch["labels"][i, :] = self.ignore_index if ( len(human_token_ids_idxs) > 0 and len(response_token_ids_idxs) > 0 and human_token_ids_idxs[0] > response_token_ids_idxs[0] ): human_token_ids_idxs = [0] + human_token_ids_idxs for idx, (start, end) in enumerate(zip(human_token_ids_idxs, response_token_ids_idxs)): # Make pytorch loss function ignore all non response tokens if idx != 0: batch["labels"][i, start:end] = self.ignore_index else: batch["labels"][i, :end] = self.ignore_index if len(response_token_ids_idxs) < len(human_token_ids_idxs): batch["labels"][i, human_token_ids_idxs[-1] :] = self.ignore_index if self.padding_free: # remove padding, `attention_mask` and add `position_ids` attn_mask = batch.pop("attention_mask") batch["input_ids"] = batch["input_ids"][attn_mask.bool()].unsqueeze(0) batch["position_ids"] = attn_mask.cumsum(1)[attn_mask.bool()].unsqueeze(0) - 1 batch["labels"] = batch["labels"][attn_mask.bool()].unsqueeze(0) batch["labels"][batch["position_ids"] == 0] = self.ignore_index # Calculate cumulative sequence lengths for queries and keys to prevent graph breaks during further computations. flattened_position_ids = batch["position_ids"].flatten() indices_q = torch.arange( flattened_position_ids.size(0), device=flattened_position_ids.device, dtype=torch.int32 ) batch["cu_seq_lens_q"] = torch.cat( ( indices_q[flattened_position_ids == 0], torch.tensor( flattened_position_ids.size(), device=flattened_position_ids.device, dtype=torch.int32 ), ) ) batch["cu_seq_lens_k"] = batch["cu_seq_lens_q"] # Determine maximum sequence lengths to prevent graph breaks during further computations. batch["max_length_k"] = flattened_position_ids.max().item() + 1 batch["max_length_q"] = batch["max_length_k"] return batch @dataclass class DataCollatorForChatML: """ Data collator for ChatML format datasets. """ tokenizer: PreTrainedTokenizerBase ignore_index: int = -100 max_length: int = None prompt_key: str = "prompt" messages_key: str = "messages" def __post_init__(self): if self.tokenizer.pad_token_id is None: raise ValueError("The tokenizer does not have a pad token. Please set `pad_token_id` in the tokenizer.") if self.max_length is None: # set a sensible default self.max_length = min(self.tokenizer.model_max_length, 1024) def __call__(self, examples: list[dict[str, Any]]) -> dict[str, torch.Tensor]: input_ids = [] attention_mask = [] prompts_input_ids = [] prompt_attention_mask = [] labels = [] for example in examples: formatted_prompt = example.get(self.prompt_key, None) if formatted_prompt is None: prompt = example[self.messages_key][:-1] formatted_prompt = self.tokenizer.apply_chat_template( prompt, tokenize=False, add_generation_prompt=True ) if "input_ids" not in example: message = example[self.messages_key] formatted_message = self.tokenizer.apply_chat_template( message, tokenize=False, add_generation_prompt=False ) tokenized_message = self.tokenizer( formatted_message, truncation=True, max_length=self.max_length, padding=False, return_tensors=None, add_special_tokens=False, ) input_ids.append(tokenized_message["input_ids"]) attention_mask.append(tokenized_message["attention_mask"]) else: input_ids.append(example["input_ids"]) attention_mask.append(example["attention_mask"]) tokenized_prompt = self.tokenizer( formatted_prompt, truncation=True, max_length=len(input_ids[-1]), padding=False, return_tensors=None, add_special_tokens=False, ) prompts_input_ids.append(tokenized_prompt["input_ids"]) prompt_attention_mask.append(tokenized_prompt["attention_mask"]) # Create the labels that will have all but the completion tokens of the example["input_ids"] set to ignore_index label = [self.ignore_index] * len(input_ids[-1]) completion_start_idx = len(tokenized_prompt["input_ids"]) label[completion_start_idx:] = input_ids[-1][completion_start_idx:] labels.append(label) # convert to list of tensors and pad input_ids = [torch.tensor(ids, dtype=torch.long) for ids in input_ids] attention_mask = [torch.tensor(mask, dtype=torch.long) for mask in attention_mask] labels = [torch.tensor(label, dtype=torch.long) for label in labels] input_ids = pad(input_ids, padding_side="left", padding_value=self.tokenizer.pad_token_id) attention_mask = pad(attention_mask, padding_side="left", padding_value=0) labels = pad(labels, padding_side="left", padding_value=self.ignore_index) prompts_input_ids = [torch.tensor(ids, dtype=torch.long) for ids in prompts_input_ids] prompt_attention_mask = [torch.tensor(mask, dtype=torch.long) for mask in prompt_attention_mask] prompts_input_ids = pad(prompts_input_ids, padding_side="left", padding_value=self.tokenizer.pad_token_id) prompt_attention_mask = pad(prompt_attention_mask, padding_side="left", padding_value=0) return { "input_ids": input_ids, "attention_mask": attention_mask, "labels": labels, "prompts": prompts_input_ids, "prompt_attention_mask": prompt_attention_mask, } @dataclass class RewardDataCollatorWithPadding: r""" Reward DataCollator class that pads the inputs to the maximum length of the batch. Args: tokenizer (`PreTrainedTokenizerBase`): The tokenizer used for encoding the data. padding (`Union[bool, str, `PaddingStrategy`]`, `optional`, defaults to `True`): padding_strategy to pass to the tokenizer. pad_to_multiple_of (`int` or `None`, `optional`, defaults to `None`): If set will pad the sequence to a multiple of the provided value. return_tensors (`str`, `optional`, defaults to `"pt"`): The tensor type to use. """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str] = True pad_to_multiple_of: Optional[int] = None return_tensors: str = "pt" def __call__(self, features: list[dict[str, Any]]) -> dict[str, Any]: features_chosen = [] features_rejected = [] margin = [] # check if we have a margin. If we do, we need to batch it as well has_margin = "margin" in features[0] for feature in features: # check if the keys are named as expected if ( "input_ids_chosen" not in feature or "input_ids_rejected" not in feature or "attention_mask_chosen" not in feature or "attention_mask_rejected" not in feature ): raise ValueError( "The features should include `input_ids_chosen`, `attention_mask_chosen`, `input_ids_rejected` and `attention_mask_rejected`" ) features_chosen.append( { "input_ids": feature["input_ids_chosen"], "attention_mask": feature["attention_mask_chosen"], } ) features_rejected.append( { "input_ids": feature["input_ids_rejected"], "attention_mask": feature["attention_mask_rejected"], } ) if has_margin: margin.append(feature["margin"]) batch_chosen = self.tokenizer.pad( features_chosen, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) batch_rejected = self.tokenizer.pad( features_rejected, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) batch = { "input_ids_chosen": batch_chosen["input_ids"], "attention_mask_chosen": batch_chosen["attention_mask"], "input_ids_rejected": batch_rejected["input_ids"], "attention_mask_rejected": batch_rejected["attention_mask"], "return_loss": True, } if has_margin: margin = torch.tensor(margin, dtype=torch.float) batch["margin"] = margin return batch def pad(tensors: list[torch.Tensor], padding_value: int = 0, padding_side: str = "right") -> torch.Tensor: """ Pads a list of tensors to the same shape along the first dimension. Args: tensors (`list[torch.Tensor]`): List of input tensors to pad. padding_value (`int`): Value to use for padding. Default is 0. padding_side (`str`): Side on which to add padding. Must be 'left' or 'right'. Default is 'right'. Returns: `torch.Tensor`: A single tensor containing the padded tensors. Examples: >>> import torch >>> pad([torch.tensor([1, 2, 3]), torch.tensor([4, 5])]) tensor([[1, 2, 3], [4, 5, 0]]) >>> pad([torch.tensor([[1, 2], [3, 4]]), torch.tensor([[5, 6]])]) tensor([[[1, 2], [3, 4]], [[5, 6], [0, 0]]]) """ # Determine the maximum shape for each dimension output_shape = np.max([t.shape for t in tensors], 0).tolist() # Create an output tensor filled with the padding value output = torch.full((len(tensors), *output_shape), padding_value, dtype=tensors[0].dtype, device=tensors[0].device) for i, t in enumerate(tensors): # Determine the slice for the sequence dimension if padding_side == "left": seq_slice = slice(output_shape[0] - t.shape[0], output_shape[0]) elif padding_side == "right": seq_slice = slice(0, t.shape[0]) else: raise ValueError("padding_side must be 'left' or 'right'") slices = (seq_slice,) + tuple(slice(0, s) for s in t.shape[1:]) output[i][slices] = t return output @dataclass class DPODataCollatorWithPadding: r""" DPO DataCollator class that pads the tokenized inputs to the maximum length of the batch. Args: pad_token_id (`int` defaults to 0): The tokenizer's pad_token_id. label_pad_token_id (`int`, defaults to -100): The label used for masking. is_encoder_decoder (`bool` or `None`, `optional`, defaults to `None`): Whether you model has an encoder_decoder architecture. """ pad_token_id: int = 0 label_pad_token_id: int = -100 is_encoder_decoder: Optional[bool] = False def __call__(self, features: list[dict[str, Any]]) -> dict[str, Any]: # first, pad everything to the same length padded_batch = {} for k in features[0].keys(): if k.endswith(("_input_ids", "_attention_mask", "_labels", "_pixel_values")): if self.is_encoder_decoder: to_pad = [torch.LongTensor(ex[k]) for ex in features] if (k.startswith("prompt")) and (k.endswith("input_ids")): if self.pad_token_id is None: raise ValueError( "Padding is enabled, but the tokenizer is not configured with a padding token." " Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)" " before calling the trainer." ) padding_value = self.pad_token_id elif k.endswith("_attention_mask"): padding_value = 0 elif k.startswith(("chosen", "rejected", "completion")) or ("decoder" in k): padding_value = self.label_pad_token_id else: raise ValueError(f"Unexpected key in batch '{k}'") padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value) else: # Set padding value based on the key if k.endswith("_input_ids"): if self.pad_token_id is None: raise ValueError( "Padding is enabled, but the tokenizer is not configured with a padding token." " Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)" " before calling the trainer." ) padding_value = self.pad_token_id elif k.endswith("_labels"): padding_value = self.label_pad_token_id elif k.endswith("_attention_mask"): padding_value = 0 elif k.endswith("_pixel_values"): padding_value = 0 # TODO: check if this is correct else: raise ValueError(f"Unexpected key in batch '{k}'") # Set padding side based on the key if k in ["prompt_input_ids", "prompt_attention_mask"]: padding_side = "left" else: padding_side = "right" # Set the dtype if k.endswith("_pixel_values"): dtype = torch.float32 # will be downcasted if necessary by the Trainer else: dtype = torch.int64 # Convert to tensor and pad to_pad = [torch.tensor(ex[k], dtype=dtype) for ex in features] padded_batch[k] = pad(to_pad, padding_value=padding_value, padding_side=padding_side) elif k.endswith("_logps"): # the cached reference model logprobs padded_batch[k] = torch.tensor([ex[k] for ex in features]) else: padded_batch[k] = [ex[k] for ex in features] return padded_batch class ConstantLengthDataset(IterableDataset): """ Iterable dataset that returns constant length chunks of tokens from stream of text files. The dataset also formats the text before tokenization with a specific format that is provided by the user. Args: tokenizer (`transformers.PreTrainedTokenizer`): The processor used for processing the data. dataset (`dataset.Dataset`): Dataset with text files. dataset_text_field (`str` or `None`, *optional*, defaults to `None`): Name of the field in the dataset that contains the text. Only one of `dataset_text_field` and `formatting_func` should be provided. formatting_func (`Callable`, *optional*): Function that formats the text before tokenization. Usually it is recommended to follow a certain pattern such as `"### Question: {question} ### Answer: {answer}"`. Only one of `dataset_text_field` and `formatting_func` should be provided. infinite (`bool`, *optional*, defaults to `False`): If True the iterator is reset after dataset reaches end else stops. seq_length (`int`, *optional*, defaults to `1024`): Length of token sequences to return. num_of_sequences (`int`, *optional*, defaults to `1024`): Number of token sequences to keep in buffer. chars_per_token (`int`, *optional*, defaults to `3.6`): Number of characters per token used to estimate number of tokens in text buffer. eos_token_id (`int`, *optional*, defaults to `0`): Id of the end of sequence token if the passed tokenizer does not have an EOS token. shuffle (`bool`, *optional*, defaults to `True`) Shuffle the examples before they are returned append_concat_token (`bool`, *optional*, defaults to `True`) If true, appends `eos_token_id` at the end of each sample being packed. add_special_tokens (`bool`, *optional*, defaults to `True`) If true, tokenizers adds special tokens to each sample being packed. """ def __init__( self, tokenizer, dataset, dataset_text_field=None, formatting_func=None, infinite=False, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6, eos_token_id=0, shuffle=True, append_concat_token=True, add_special_tokens=True, ): self.tokenizer = tokenizer self.concat_token_id = tokenizer.eos_token_id if tokenizer.eos_token_id else eos_token_id self.dataset = dataset self.seq_length = seq_length self.infinite = infinite self.current_size = 0 self.max_buffer_size = seq_length * chars_per_token * num_of_sequences self.shuffle = shuffle self.append_concat_token = append_concat_token self.add_special_tokens = add_special_tokens if dataset_text_field is not None and formatting_func is not None: warnings.warn( "Only one of `dataset_text_field` and `formatting_func` should be provided. " "Ignoring `dataset_text_field` and using `formatting_func`.", UserWarning, ) if formatting_func is not None: self.formatting_func = formatting_func elif dataset_text_field is not None: self.formatting_func = lambda x: x[dataset_text_field] else: # neither is provided raise ValueError("Either `dataset_text_field` or `formatting_func` should be provided.") self.pretokenized = False column_names = ( dataset.column_names if isinstance(dataset, (datasets.Dataset, datasets.IterableDataset)) else None ) if column_names is not None and "input_ids" in column_names: self.pretokenized = True # since the dataset is tokenized, the unit of buffer size should be tokens self.max_buffer_size = seq_length * num_of_sequences def __len__(self): return len(self.dataset) def __iter__(self): iterator = iter(self.dataset) more_examples = True while more_examples: buffer, buffer_len = [], 0 while True: if buffer_len >= self.max_buffer_size: break try: buffer.append(self.formatting_func(next(iterator))) buffer_len += len(buffer[-1]) except StopIteration: if self.infinite: iterator = iter(self.dataset) else: more_examples = False break if self.shuffle: random.shuffle(buffer) if self.pretokenized: tokenized_inputs = buffer else: tokenized_inputs = self.tokenizer( buffer, add_special_tokens=self.add_special_tokens, truncation=False )["input_ids"] all_token_ids = [] for tokenized_input in tokenized_inputs: if self.append_concat_token: tokenized_input = tokenized_input + [self.concat_token_id] all_token_ids.extend(tokenized_input) examples = [] for i in range(0, len(all_token_ids), self.seq_length): input_ids = all_token_ids[i : i + self.seq_length] if len(input_ids) == self.seq_length: examples.append(input_ids) if self.shuffle: # Shuffle again, otherwise split examples occur in consecutive tensors. random.shuffle(examples) for example in examples: self.current_size += 1 yield { "input_ids": torch.LongTensor(example), "labels": torch.LongTensor(example), } @dataclass class RunningMoments: """ Calculates the running mean and standard deviation of a data stream. Reference: https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L75 """ accelerator: Accelerator mean: float = 0 std: float = 1 var: float = 1 count: float = 1e-24 @torch.no_grad() def update(self, xs: torch.Tensor) -> tuple[float, float]: """ Updates running moments from batch's moments computed across ranks """ if self.accelerator.use_distributed: xs_mean, xs_var, xs_count = get_global_statistics(self.accelerator, xs) else: xs_count = xs.numel() xs_var, xs_mean = torch.var_mean(xs, unbiased=False) xs_mean, xs_var = xs_mean.float(), xs_var.float() delta = xs_mean - self.mean tot_count = self.count + xs_count new_sum = xs_var * xs_count # correct old_sum deviation accounting for the new mean old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count tot_sum = old_sum + new_sum self.mean += (delta * xs_count / tot_count).item() new_var = tot_sum / tot_count self.std = (new_var * tot_count / (tot_count - 1)).float().sqrt().item() self.var = new_var.item() self.count = tot_count return xs_mean.item(), (xs_var * xs_count / (xs_count - 1)).float().sqrt().item() def save_to_json(self, json_path: str): """Save the content of this instance in JSON format inside `json_path`.""" # save everything except accelerator if self.accelerator.is_main_process: save_dict = dataclasses.asdict(self, dict_factory=lambda x: {k: v for (k, v) in x if k != "accelerator"}) json_string = json.dumps(save_dict, indent=2, sort_keys=True) + "\n" with open(json_path, "w", encoding="utf-8") as f: f.write(json_string) @classmethod def load_from_json(cls, accelerator: Accelerator, json_path: str): """Create an instance from the content of `json_path`.""" # load everything except accelerator with open(json_path, encoding="utf-8") as f: text = f.read() return cls(accelerator=accelerator, **json.loads(text)) @torch.no_grad() def get_global_statistics( accelerator, xs: torch.Tensor, mask=None, device="cpu" ) -> tuple[torch.Tensor, torch.Tensor, int]: """ Computes element-wise mean and variance of the tensor across processes. Reference: https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L57C1-L73C75 """ xs = xs.to(accelerator.device) sum_and_count = torch.tensor([xs.sum(), (xs.numel() if mask is None else mask.sum())], device=xs.device) sum_and_count = accelerator.reduce(sum_and_count) global_sum, count = sum_and_count global_mean = global_sum / count sum_var = torch.sum(((xs - global_mean) ** 2).mul(1 if mask is None else mask)) sum_var = accelerator.reduce(sum_var) global_var = sum_var / count return global_mean.to(device), global_var.to(device), count.item() def compute_accuracy(eval_pred: EvalPrediction) -> dict[str, float]: predictions, labels = eval_pred if predictions.ndim == 3: # Token classification task. Shapes are (batch_size, seq_len, num_labels) and (batch_size, seq_len) # Used to compute the accuracy in the prm_trainer. predictions = np.argmax(predictions, axis=2) # Flatten the predictions and labels to remove the ignored tokens. predictions = np.array( [p for prediction, label in zip(predictions, labels) for (p, lbl) in zip(prediction, label) if lbl != -100] ) labels = np.array([lbl for label in labels for lbl in label if lbl != -100]) else: # Here, predictions is rewards_chosen and rewards_rejected. Shapes are (batch_size, 2) and (batch_size,) # We want to see how much of the time rewards_chosen > rewards_rejected. equal_mask = predictions[:, 0] == predictions[:, 1] equal_predictions_count = int(equal_mask.sum()) if equal_predictions_count > 0: warnings.warn( f"There are {equal_predictions_count} out of {len(predictions[:, 0])} instances where the predictions " "for both options are equal. These instances are ignored in the accuracy computation.", UserWarning, ) # Filter out equal predictions predictions = predictions[~equal_mask] labels = labels[~equal_mask] # Use the remaining predictions for accuracy calculation predictions = np.argmax(predictions, axis=1) accuracy = np.array(predictions == labels, dtype=float).mean().item() return {"accuracy": accuracy} def pad_to_length(tensor: torch.Tensor, length: int, pad_value: Union[int, float], dim: int = -1) -> torch.Tensor: if tensor.size(dim) >= length: return tensor else: pad_size = list(tensor.shape) pad_size[dim] = length - tensor.size(dim) return torch.cat( [ tensor, pad_value * torch.ones(*pad_size, dtype=tensor.dtype, device=tensor.device), ], dim=dim, ) def disable_dropout_in_model(model: torch.nn.Module) -> None: for module in model.modules(): if isinstance(module, torch.nn.Dropout): module.p = 0 def exact_div(a, b, custom_error_message=""): q = a // b if a != q * b: raise ValueError(f"{custom_error_message}, inexact division: {a} / {b} = {a / b}") return q # copied from https://github.com/kvablack/ddpo-pytorch/blob/main/ddpo_pytorch/stat_tracking.py#L5 class PerPromptStatTracker: r""" Class for tracking statistics per prompt. Mainly used to calculate advantage for the DPPO algorithm Args: buffer_size (`int`): Size of the buffer to keep for each prompt. min_count (`int`): Minimum number of samples to keep in the buffer before calculating the mean and std. """ def __init__(self, buffer_size, min_count): self.buffer_size = buffer_size self.min_count = min_count self.stats = {} def update(self, prompts, rewards): prompts = np.array(prompts) rewards = np.array(rewards) unique = np.unique(prompts) advantages = np.empty_like(rewards) for prompt in unique: prompt_rewards = rewards[prompts == prompt] if prompt not in self.stats: self.stats[prompt] = deque(maxlen=self.buffer_size) self.stats[prompt].extend(prompt_rewards) if len(self.stats[prompt]) < self.min_count: mean = np.mean(rewards) std = np.std(rewards) + 1e-6 else: mean = np.mean(self.stats[prompt]) std = np.std(self.stats[prompt]) + 1e-6 advantages[prompts == prompt] = (prompt_rewards - mean) / std return advantages def get_stats(self): return {k: {"mean": np.mean(v), "std": np.std(v), "count": len(v)} for k, v in self.stats.items()} def peft_module_casting_to_bf16(model): for name, module in model.named_modules(): if isinstance(module, torch.nn.LayerNorm) or "norm" in name: module = module.to(torch.float32) elif any(x in name for x in ["lm_head", "embed_tokens", "wte", "wpe"]): if hasattr(module, "weight"): if module.weight.dtype == torch.float32: module = module.to(torch.bfloat16) def get_quantization_config(model_args: ModelConfig) -> Optional[BitsAndBytesConfig]: if model_args.load_in_4bit: quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=model_args.torch_dtype, # For consistency with model weights, we use the same value as `torch_dtype` bnb_4bit_quant_type=model_args.bnb_4bit_quant_type, bnb_4bit_use_double_quant=model_args.use_bnb_nested_quant, bnb_4bit_quant_storage=model_args.torch_dtype, ) elif model_args.load_in_8bit: quantization_config = BitsAndBytesConfig( load_in_8bit=True, ) else: quantization_config = None return quantization_config def get_kbit_device_map() -> Optional[dict[str, int]]: if is_torch_xpu_available(): return {"": f"xpu:{PartialState().local_process_index}"} elif torch.cuda.is_available(): return {"": PartialState().local_process_index} else: return None def get_peft_config(model_args: ModelConfig) -> "Optional[PeftConfig]": if model_args.use_peft is False: return None if not is_peft_available(): raise ValueError( "You need to have PEFT library installed in your environment, make sure to install `peft`. " "Make sure to run `pip install -U peft`." ) peft_config = LoraConfig( task_type=model_args.lora_task_type, r=model_args.lora_r, target_modules=model_args.lora_target_modules, lora_alpha=model_args.lora_alpha, lora_dropout=model_args.lora_dropout, bias="none", use_rslora=model_args.use_rslora, modules_to_save=model_args.lora_modules_to_save, ) return peft_config def get_exp_cap(value, decimal=4): """ Get the exponent cap of a value. This is used to cap the exponent of a value to avoid overflow. The formula is : log(value.dtype.max) E.g. For float32 data type, the maximum exponent value is 88.7228 to 4 decimal points. ``` Args: value (`torch.Tensor`): The input tensor to obtain the data type decimal (`int`): The number of decimal points of the output exponent cap. eg: direct calling exp(log(torch.float32.max)) will result in inf so we cap the exponent to 88.7228 to avoid overflow. """ vdtype_max = torch.zeros([1]).to(value.dtype) + torch.finfo(value.dtype).max vdtype_log_max = torch.log(vdtype_max).to(value.device) return torch.floor(vdtype_log_max * 10**decimal) / 10**decimal if decimal > 0 else vdtype_log_max def cap_exp(value, cap=-1): # Cap the exponent value below the upper-bound to avoid overflow, before calling torch.exp cap = get_exp_cap(value) if cap < 0 else cap return torch.exp(torch.clamp(value, max=cap)) def print_rich_table(df: pd.DataFrame) -> Table: console = Console() table = Table(show_lines=True) for column in df.columns: table.add_column(column) for _, row in df.iterrows(): table.add_row(*row.astype(str).tolist()) console.print(table) SIMPLE_SFT_CHAT_TEMPLATE = "{% for message in messages %}{{' ' + message['content']}}{% endfor %}{{ eos_token }}" # SIMPLE_SFT_CHAT_TEMPLATE simply ends things with an EOS token, this helps the SFT model learn to end the completions with EOS tokens SIMPLE_CHAT_TEMPLATE = "{% for message in messages %}{{message['role'].capitalize() + ': ' + message['content'] + '\n\n'}}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}" @dataclass class OnlineTrainerState(TrainerState): episode: int = 0 @dataclass class OnPolicyConfig(TrainingArguments): r""" Base configuration class for on-policy trainers. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: run_name (`str` or `None`, *optional*, defaults to `None`): Name of the run. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. num_mini_batches (`int`, *optional*, defaults to `1`): Number of minibatches to split a batch into. total_episodes (`int` or `None`, *optional*, defaults to `None`): Total number of episodes in the dataset. local_rollout_forward_batch_size (`int`, *optional*, defaults to `64`): Per rank no grad forward pass in the rollout phase. num_sample_generations (`int`, *optional*, defaults to `10`): Number of debugging samples generations (i.e., `generate_completions` calls) throughout training. response_length (`int`, *optional*, defaults to `53`): Length of the response. stop_token (`str` or `None`, *optional*, defaults to `None`): Specifies the stop token to use for text generation. This parameter is mutually exclusive with `stop_token_id`. - `None`: No stop token is applied, unless `stop_token_id` is specified. - `'eos'`: Uses the tokenizer's `eos_token`. stop_token_id (`int` or `None`, *optional*, defaults to `None`): Specifies the ID of the stop token to use for text generation. If `None`, no stop token ID is applied, unless `stop_token` is specified. This parameter is mutually exclusive with `stop_token`. temperature (`float`, *optional*, defaults to `0.7`): Sampling temperature. missing_eos_penalty (`float` or `None`, *optional*, defaults to `None`): Penalty applied to the score when the model fails to generate an EOS token. This is useful to encourage to generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be a positive value. sft_model_path (`str`, *optional*, defaults to `"EleutherAI/pythia-160m"`): Path to the SFT model. world_size (`int` or `None`, *optional*, defaults to `None`): Number of processes (GPUs) to use for the training. num_total_batches (`int` or `None`, *optional*, defaults to `None`): Number of total batches to train. micro_batch_size (`int` or `None`, *optional*, defaults to `None`): Micro batch size across devices (HF's `per_device_train_batch_size` * `world_size`). local_batch_size (`int` or `None`, *optional*, defaults to `None`): Batch size per GPU (HF's `per_device_train_batch_size` * `gradient_accumulation_steps`). batch_size (`int` or `None`, *optional*, defaults to `None`): Batch size across devices (HF's `per_device_train_batch_size` * `world_size` * `gradient_accumulation_steps`). local_mini_batch_size (`int` or `None`, *optional*, defaults to `None`): Mini batch size per GPU. mini_batch_size (`int` or `None`, *optional*, defaults to `None`): Mini batch size across GPUs. push_to_hub (`bool`, *optional*, defaults to `False`): Whether to push the model to the Hub after training. """ run_name: Optional[str] = field( default=None, metadata={"help": "Name of the run."}, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of processes to use for processing the dataset."}, ) num_mini_batches: int = field( default=1, metadata={"help": "Number of minibatches to split a batch into."}, ) total_episodes: Optional[int] = field( default=None, metadata={"help": "Total number of episodes in the dataset."}, ) local_rollout_forward_batch_size: int = field( default=64, metadata={"help": "Per rank no grad forward pass in the rollout phase."}, ) num_sample_generations: int = field( default=10, metadata={ "help": "Number of debugging samples generations (i.e., `generate_completions` calls) throughout training." }, ) response_length: int = field( default=53, metadata={"help": "Length of the response."}, ) stop_token: Optional[Literal["eos"]] = field( default=None, metadata={ "help": "Specifies the stop token to use for text generation. This parameter is mutually exclusive with " "`stop_token_id`." }, ) stop_token_id: Optional[int] = field( default=None, metadata={ "help": "Specifies the ID of the stop token to use for text generation. If `None`, no stop token ID is " "applied, unless `stop_token` is specified. This parameter is mutually exclusive with `stop_token`." }, ) temperature: float = field( default=0.7, metadata={"help": "Sampling temperature."}, ) missing_eos_penalty: Optional[float] = field( default=None, metadata={ "help": "Penalty applied to the score when the model fails to generate an EOS token. This is useful to " "encourage to generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be " "a positive value." }, ) sft_model_path: str = field( default="EleutherAI/pythia-160m", metadata={"help": "Path to the SFT model."}, ) world_size: Optional[int] = field( default=None, metadata={"help": "Number of processes (GPUs) to use for the training."}, ) num_total_batches: Optional[int] = field( default=None, metadata={"help": "Number of total batches to train."}, ) micro_batch_size: Optional[int] = field( default=None, metadata={"help": "Micro batch size across devices (HF's `per_device_train_batch_size` * `world_size`)."}, ) local_batch_size: Optional[int] = field( default=None, metadata={"help": "Batch size per GPU (HF's `per_device_train_batch_size` * `gradient_accumulation_steps`)."}, ) batch_size: Optional[int] = field( default=None, metadata={ "help": "Batch size across devices (HF's `per_device_train_batch_size` * `world_size` * " "`gradient_accumulation_steps`)." }, ) local_mini_batch_size: Optional[int] = field( default=None, metadata={"help": "Mini batch size per GPU."}, ) mini_batch_size: Optional[int] = field( default=None, metadata={"help": "Mini batch size across GPUs."}, ) push_to_hub: bool = field( default=False, metadata={"help": "Whether to push the model to the Hub after training."}, ) def first_true_indices(bools: torch.Tensor, dtype=torch.long): """ Takes an N-dimensional bool tensor and returns an (N-1)-dimensional tensor of integers giving the position of the first True in each "row". Returns the length of the rows (bools.size(-1)) if no element is True in a given row. Args: bools (`torch.Tensor`): An N-dimensional boolean tensor. dtype (`torch.dtype`, optional): The desired data type of the output tensor. Defaults to `torch.long`. Returns: `torch.Tensor`: An (N-1)-dimensional tensor of integers indicating the position of the first True in each row. If no True value is found in a row, returns the length of the row. """ row_len = bools.size(-1) zero_or_index = row_len * (~bools).type(dtype) + torch.arange(row_len, dtype=dtype, device=bools.device) return torch.min(zero_or_index, dim=-1).values def get_reward( model: torch.nn.Module, query_responses: torch.Tensor, pad_token_id: int, context_length: int ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Computes the reward logits and the rewards for a given model and query responses. Args: model (`torch.nn.Module`): The model used to compute the reward logits. query_responses (`torch.Tensor`): The tensor containing the query responses. pad_token_id (`int`): The token ID representing the pad token. context_length (`int`): The length of the context in the query responses. Returns: tuple: - `reward_logits` (`torch.Tensor`): The logits for the reward model. - `final_rewards` (`torch.Tensor`): The final rewards for each query response. - `sequence_lengths` (`torch.Tensor`): The lengths of the sequences in the query responses. """ attention_mask = query_responses != pad_token_id position_ids = attention_mask.cumsum(1) - attention_mask.long() # exclusive cumsum lm_backbone = getattr(model, model.base_model_prefix) input_ids = torch.masked_fill(query_responses, ~attention_mask, 0) output = lm_backbone( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, return_dict=True, output_hidden_states=True, use_cache=False, # otherwise mistral-based RM would error out ) reward_logits = model.score(output.hidden_states[-1]) sequence_lengths = first_true_indices(query_responses[:, context_length:] == pad_token_id) - 1 + context_length # https://github.com/huggingface/transformers/blob/dc68a39c8111217683bf49a4912d0c9018bab33d/src/transformers/models/gpt2/modeling_gpt2.py#L1454 return ( reward_logits, reward_logits[ torch.arange(reward_logits.size(0), device=reward_logits.device), sequence_lengths, ].squeeze(-1), sequence_lengths, ) def forward( model: torch.nn.Module, query_responses: torch.Tensor, pad_token_id: int, ) -> torch.nn.Module: """ Performs a forward pass through the model with the given query responses and pad token ID. Args: model (`torch.nn.Module`): The model to perform the forward pass. query_responses (`torch.Tensor`): The tensor containing the query responses. pad_token_id (`int`): The token ID representing the pad token. Returns: `torch.nn.Module`: The output of the model, including hidden states. """ attention_mask = query_responses != pad_token_id position_ids = attention_mask.cumsum(1) - attention_mask.long() input_ids = torch.masked_fill(query_responses, ~attention_mask, 0) return model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, return_dict=True, output_hidden_states=True, ) def prepare_deepspeed( model: torch.nn.Module, per_device_train_batch_size: int, fp16: bool = False, bf16: bool = False ): """ Prepares the model for training with DeepSpeed (both for stage 2 and 3), configuring the appropriate settings based on the model and batch size. Args: model (`torch.nn.Module`): The model to be prepared for DeepSpeed training. per_device_train_batch_size (`int`): The training batch size per device. Returns: `torch.nn.Module`: The model initialized and configured with DeepSpeed for training. """ import deepspeed deepspeed_plugin = AcceleratorState().deepspeed_plugin config_kwargs = deepspeed_plugin.deepspeed_config if config_kwargs["zero_optimization"]["stage"] != 3: config_kwargs["train_micro_batch_size_per_gpu"] = per_device_train_batch_size config_kwargs = { "train_micro_batch_size_per_gpu": config_kwargs["train_micro_batch_size_per_gpu"], "prescale_gradients": False, "wall_clock_breakdown": False, } if bf16: config_kwargs["bf16"] = {"enabled": True} elif fp16: config_kwargs["fp16"] = {"enabled": True} else: if hasattr(model, "config"): hidden_size = ( max(model.config.hidden_sizes) if getattr(model.config, "hidden_sizes", None) else getattr(model.config, "hidden_size", None) ) if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3: # Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0` # This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081 config_kwargs.update( { "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, "zero_optimization.stage3_prefetch_bucket_size": 0, } ) model, *_ = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model def truncate_response(stop_token_id: int, pad_token_id: int, responses: torch.Tensor): """ Truncates the responses at the first occurrence of the stop token, filling the rest with pad tokens. Args: stop_token_id (`int`): The token ID representing the stop token where truncation occurs. pad_token_id (`int`): The token ID representing the pad token used to fill the truncated responses. responses (`torch.Tensor`): The tensor containing the responses to be truncated. Returns: `torch.Tensor`: The truncated responses tensor with pad tokens filled after the stop token. """ trunc_idxs = first_true_indices(responses == stop_token_id).unsqueeze(-1) new_size = [1] * (len(responses.size()) - 1) + [responses.shape[1]] idxs = torch.arange(responses.shape[1], device=responses.device).view(*new_size) postprocessed_responses = torch.masked_fill(responses, idxs > trunc_idxs, pad_token_id) return postprocessed_responses def generate( lm_backbone: torch.nn.Module, queries: torch.Tensor, pad_token_id: int, generation_config: GenerationConfig ) -> tuple[torch.Tensor, torch.Tensor]: """ Generates sequences from the language model backbone in a way that does not affect padding tokens. Args: lm_backbone (`torch.nn.Module`): The language model backbone used for generation. queries (`torch.Tensor`): The tensor containing the input queries. pad_token_id (`int`): The token ID representing the pad token. generation_config (`GenerationConfig`): The configuration for the generation process. Returns: tuple: - `generated_sequences` (`torch.Tensor`): The concatenated tensor of input queries and generated sequences. - `logits` (`torch.Tensor`): The logits output from the generation process. """ context_length = queries.shape[1] attention_mask = queries != pad_token_id input_ids = torch.masked_fill(queries, ~attention_mask, 0) output = lm_backbone.generate( input_ids=input_ids, attention_mask=attention_mask, # position_ids=attention_mask.cumsum(1) - attention_mask.long(), # not needed: already adjusted in generations # https://github.com/huggingface/transformers/blob/ac33aeeeee2a7a89b89c93c2962e6feb90daef0a/src/transformers/models/gpt2/modeling_gpt2.py#L1227-L1250 generation_config=generation_config, return_dict_in_generate=True, output_scores=True, ) logits = torch.stack(output.scores, 1) return torch.cat((queries, output.sequences[:, context_length:]), dim=1), logits @torch.no_grad() def batch_generation( model: torch.nn.Module, queries: torch.Tensor, local_rollout_forward_batch_size: int, pad_token_id: int, generation_config: GenerationConfig, ): query_responses = [] logitss = [] batch_size = queries.shape[0] for i in range(0, batch_size, local_rollout_forward_batch_size): query = queries[i : i + local_rollout_forward_batch_size] query_response, logits = generate( model, query, pad_token_id, generation_config, ) query_responses.append(query_response) logitss.append(logits) # padding tensors padded_query_responses = pad(query_responses, padding_value=pad_token_id, padding_side="right") padded_logitss = pad(logitss, padding_value=0, padding_side="right") # reshaping padded_query_responses = padded_query_responses.view(-1, padded_query_responses.shape[-1])[:batch_size] padded_logitss = padded_logitss.view(-1, *padded_logitss.shape[2:])[:batch_size] return padded_query_responses, padded_logitss def add_bos_token_if_needed( bos_token_id: Optional[int], prompt_len_input_ids: int, prompt_tokens: dict[str, list[int]], chosen_prompt_len_input_ids: int, chosen_tokens: dict[str, list[int]], rejected_prompt_len_input_ids: int, rejected_tokens: dict[str, list[int]], ): if bos_token_id is not None: if prompt_len_input_ids == 0 or bos_token_id != prompt_tokens["prompt_input_ids"][0]: prompt_tokens["prompt_input_ids"] = [bos_token_id] + prompt_tokens["prompt_input_ids"] prompt_tokens["prompt_attention_mask"] = [1] + prompt_tokens["prompt_attention_mask"] if chosen_prompt_len_input_ids == 0 or bos_token_id != chosen_tokens["prompt_input_ids"][0]: chosen_tokens["prompt_input_ids"] = [bos_token_id] + chosen_tokens["prompt_input_ids"] chosen_tokens["prompt_attention_mask"] = [1] + chosen_tokens["prompt_attention_mask"] if rejected_prompt_len_input_ids == 0 or bos_token_id != rejected_tokens["prompt_input_ids"][0]: rejected_tokens["prompt_input_ids"] = [bos_token_id] + rejected_tokens["prompt_input_ids"] rejected_tokens["prompt_attention_mask"] = [1] + rejected_tokens["prompt_attention_mask"] return prompt_tokens, chosen_tokens, rejected_tokens def add_eos_token_if_needed( eos_token_id: int, chosen_tokens: dict[str, list[int]], rejected_tokens: dict[str, list[int]] ): if len(chosen_tokens["input_ids"]) == 0 or eos_token_id != chosen_tokens["input_ids"][-1]: chosen_tokens["input_ids"].append(eos_token_id) chosen_tokens["attention_mask"].append(1) if len(rejected_tokens["input_ids"]) == 0 or eos_token_id != rejected_tokens["input_ids"][-1]: rejected_tokens["input_ids"].append(eos_token_id) rejected_tokens["attention_mask"].append(1) return chosen_tokens, rejected_tokens def truncate_right( input_ids: torch.Tensor, stop_token_id: int, pad_token_id: int ) -> tuple[torch.Tensor, torch.Tensor]: """ Truncates the input tensor from the right side after the first occurrence of the stop token. Args: input_ids (`torch.Tensor`): The tensor containing the responses to be truncated stop_token_id (`int`): The token ID representing the stop token where truncation occurs pad_token_id (`int`): The token ID representing the pad token used to fill the truncated responses Returns: tuple: - `output_ids` (`torch.Tensor`): The truncated responses tensor with pad tokens filled after the stop token - `mask` (`torch.Tensor`): The mask tensor to indicate the padding tokens """ trunc_idxs = first_true_indices(input_ids == stop_token_id).unsqueeze(-1) new_size = [1] * (len(input_ids.size()) - 1) + [input_ids.shape[1]] idxs = torch.arange(input_ids.shape[1], device=input_ids.device).view(*new_size) output_ids = torch.masked_fill(input_ids, idxs > trunc_idxs, pad_token_id) mask = torch.masked_fill(torch.ones_like(input_ids), idxs > trunc_idxs, 0) return output_ids, mask def empty_cache() -> None: """Empties the cache of the available torch device. This function checks for the availability of different torch devices (XPU, MLU, NPU, CUDA) and empties the cache of the first available device it finds. If none of the specific devices are available, it defaults to emptying the CUDA cache. """ if is_torch_xpu_available(): torch.xpu.empty_cache() elif is_torch_mlu_available(): torch.mlu.empty_cache() elif is_torch_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() def decode_and_strip_padding(inputs: torch.Tensor, tokenizer: PreTrainedTokenizerBase) -> list[str]: """ Decodes the input tensor and strips the padding tokens. Args: inputs (`torch.Tensor`): The input tensor to be decoded. tokenizer (`transformers.PreTrainedTokenizerBase`): The tokenizer used to decode the input tensor. Returns: `list[str]`: The list of decoded strings with padding tokens stripped. """ decoded = tokenizer.batch_decode(inputs, skip_special_tokens=False) return [d.replace(tokenizer.pad_token, "") for d in decoded] def generate_model_card( base_model: Optional[str], model_name: str, hub_model_id: str, dataset_name: Optional[str], tags: list[str], wandb_url: Optional[str], trainer_name: str, trainer_citation: Optional[str] = None, paper_title: Optional[str] = None, paper_id: Optional[str] = None, comet_url: Optional[str] = None, ) -> ModelCard: """ Generate a `ModelCard` from a template. Args: base_model (`str` or `None`): Base model name. model_name (`str`): Model name. hub_model_id (`str`): Hub model ID as `username/model_id`. dataset_name (`str` or `None`): Dataset name. tags (`list[str]`): Tags. wandb_url (`str` or `None`): Weights & Biases run URL. comet_url (`str` or `None`): Comet experiment URL. trainer_name (`str`): Trainer name. trainer_citation (`str` or `None`, defaults to `None`): Trainer citation as a BibTeX entry. paper_title (`str` or `None`, defaults to `None`): Paper title. paper_id (`str` or `None`, defaults to `None`): ArXiv paper ID as `YYMM.NNNNN`. Returns: `ModelCard`: A ModelCard object. """ card_data = ModelCardData( base_model=base_model, datasets=dataset_name, library_name="transformers", licence="license", model_name=model_name, tags=["generated_from_trainer", *tags], ) card = ModelCard.from_template( card_data, template_path=str(pkg_resources.files("trl").joinpath("templates/lm_model_card.md")), base_model=base_model, model_name=model_name, hub_model_id=hub_model_id, dataset_name=dataset_name, wandb_url=wandb_url, comet_url=comet_url, trainer_name=trainer_name, trainer_citation=trainer_citation, paper_title=paper_title, paper_id=paper_id, trl_version=version("trl"), transformers_version=version("transformers"), pytorch_version=version("torch"), datasets_version=version("datasets"), tokenizers_version=version("tokenizers"), ) return card def get_comet_experiment_url() -> Optional[str]: """ If Comet integration is enabled, return the URL of the current Comet experiment; otherwise, return `None`. """ if not is_comet_available(): return None if comet_ml.get_running_experiment() is not None: return comet_ml.get_running_experiment().url return None def log_table_to_comet_experiment(name: str, table: pd.DataFrame) -> None: """ If Comet integration is enabled logs a table to the Comet experiment if it is currently running. Args: name (`str`): Table name. table (`pd.DataFrame`): The Pandas DataFrame containing the table to log. """ if not is_comet_available(): raise ModuleNotFoundError("The comet-ml is not installed. Please install it first: pip install comet-ml") experiment = comet_ml.get_running_experiment() if experiment is not None: experiment.log_table(tabular_data=table, filename=name) def flush_left(mask: torch.Tensor, *tensors: torch.Tensor) -> tuple[torch.Tensor, ...]: """ Shift non-zero elements in the mask and corresponding tensors to the left. This function operates on a binary mask and any number of additional tensors with the same dimensions as the mask. For each row, non-zero values are shifted to the leftmost positions. Then, columns that contain only zeros across all rows are truncated from the mask and tensors. Visually, this operation can be represented as follows: ``` [[0, 0, x, x, x, x], -> [[x, x, x, x], [0, x, x, x, 0, 0]] [x, x, x, 0]] ``` Args: mask (`torch.Tensor`): 2D tensor (binary mask) with shape `(N, M)`. *tensors (`torch.Tensor`) One or more 2D tensors with the same shape as `mask`. These tensors will be processed alongside `mask`, with non-zero values shifted and excess zero columns truncated in the same manner. Returns: `torch.Tensor`: Updated binary mask with non-zero values flushed to the left and trailing zero columns removed. `*torch.Tensor` Updated tensors, processed in the same way as the mask. Example: ```python >>> mask = torch.tensor([[0, 0, 1, 1, 1], ... [0, 1, 1, 0, 0]]) >>> tensor = torch.tensor([[9, 9, 2, 3, 4], ... [9, 5, 6, 9, 9]]) >>> new_mask, new_tensor = flush_left(mask, tensor) >>> print(new_mask) tensor([[1, 1, 1], [1, 1, 0]]) >>> print(new_tensor) tensor([[2, 3, 4], [5, 6, 0]]) ``` """ # Create copy of mask and tensors mask = mask.clone() tensors = [t.clone() for t in tensors] # Shift non-zero values to the left for i in range(mask.size(0)): first_one_idx = torch.nonzero(mask[i])[0].item() mask[i] = torch.roll(mask[i], shifts=-first_one_idx) for tensor in tensors: tensor[i] = torch.roll(tensor[i], shifts=-first_one_idx) # Get the first column idx that is all zeros and remove every column after that empty_cols = torch.sum(mask, dim=0) == 0 first_empty_col = torch.nonzero(empty_cols)[0].item() if empty_cols.any() else mask.size(1) mask = mask[:, :first_empty_col] for i, tensor in enumerate(tensors): tensors[i] = tensor[:, :first_empty_col] if not tensors: return mask else: return mask, *tensors def compute_token_accuracy(logits: torch.Tensor, labels: torch.Tensor, ignore_index: int = -100) -> float: """ Compute the mean token accuracy. """ # Get predictions predictions = logits.argmax(dim=-1) # Create mask for non-padding tokens (assuming pad_token_id is ignore_index) mask = labels != ignore_index # Calculate accuracy only on non-padding tokens correct_predictions = (predictions == labels) & mask total_tokens = mask.sum() correct_tokens = correct_predictions.sum() # Calculate accuracy accuracy = correct_tokens.item() / total_tokens.item() if total_tokens > 0 else 0.0 return accuracy def selective_log_softmax(logits, index): """ A memory-efficient implementation of the common `log_softmax -> gather` operation. This function is equivalent to the following naive implementation: ```python logps = torch.gather(logits.log_softmax(-1), dim=-1, index=index.unsqueeze(-1)).squeeze(-1) ``` Args: logits (`torch.Tensor`): Logits tensor of shape `(..., num_classes)`. index (`torch.Tensor`): Index tensor of shape `(...)`, specifying the positions to gather from the log-softmax output. Returns: `torch.Tensor`: Gathered log probabilities with the same shape as `index`. """ if logits.dtype in [torch.float32, torch.float64]: selected_logits = torch.gather(logits, dim=-1, index=index.unsqueeze(-1)).squeeze(-1) # loop to reduce peak mem consumption logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits]) per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x) else: # logsumexp approach is unstable with bfloat16, fall back to slightly less efficent approach per_token_logps = [] for row_logits, row_labels in zip(logits, index): # loop to reduce peak mem consumption row_logps = F.log_softmax(row_logits, dim=-1) row_per_token_logps = row_logps.gather(dim=-1, index=row_labels.unsqueeze(-1)).squeeze(-1) per_token_logps.append(row_per_token_logps) per_token_logps = torch.stack(per_token_logps) return per_token_logps
trl/trl/trainer/utils.py/0
{ "file_path": "trl/trl/trainer/utils.py", "repo_id": "trl", "token_count": 31293 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import time import torch import transformers from measures_util import end_measure, log_measures, start_measure from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from accelerate.utils import compute_module_sizes DEFAULT_MODELS = { "gpt-j-6b": {"is_causal": True, "model": "sgugger/sharded-gpt-j-6B", "tokenizer": "EleutherAI/gpt-j-6B"}, "gpt-neox": {"is_causal": True, "model": "EleutherAI/gpt-neox-20b"}, "opt": {"is_causal": True, "model": "facebook/opt-30b"}, "T0pp": {"is_causal": False, "model": "bigscience/T0pp", "model_revision": "sharded"}, } PROMPTS = [ "Hello, my name is", "Are unicorns real? Unicorns are", "For the first time in several years,", "My name is Julien and I am", "The goal of life is", "Whenever I'm sad, I like to", ] def parse_args(): parser = argparse.ArgumentParser(description="Run and time generations on a big model using Accelerate.") parser.add_argument("model_name", type=str, default=None, help="The name of the model to try.") parser.add_argument( "--tokenizer_name", type=str, default=None, help="The name of the tokenizer (if different from the model." ) parser.add_argument("--is_causal", type=bool, default=None, help="Whether or not the model is causal.") parser.add_argument( "--model_revision", type=str, default=None, help="The revision to use for the model checkpoint." ) parser.add_argument("--torch_dtype", type=str, default=None, help="The dtype for the model.") parser.add_argument("--disk_offload", action="store_true") args = parser.parse_args() # Sanitize args if args.model_name in DEFAULT_MODELS: defaults = DEFAULT_MODELS[args.model_name] args.model_name = defaults["model"] if args.tokenizer_name is None: args.tokenizer_name = defaults.get("tokenizer", args.model_name) if args.is_causal is None: args.is_causal = defaults["is_causal"] if args.model_revision is None: args.model_revision = defaults.get("model_revision", "main") if args.is_causal is None: raise ValueError("Could not infer the default for `--is_causal`, pass either True or False for it.") if args.tokenizer_name is None: args.tokenizer_name = args.model_name if args.model_revision is None: args.model_revision = "main" return args def main(): transformers.utils.logging.set_verbosity_error() args = parse_args() if args.torch_dtype is None: config = AutoConfig.from_pretrained(args.model_name) torch_dtype = getattr(config, "torch_dtype", torch.float32) else: torch_dtype = getattr(torch, args.torch_dtype) model_cls = AutoModelForCausalLM if args.is_causal else AutoModelForSeq2SeqLM kwargs = { "torch_dtype": torch_dtype, "revision": args.model_revision, } if args.disk_offload: kwargs["offload_folder"] = "tmp_offload" kwargs["offload_state_dict"] = True start_measures = start_measure() model = model_cls.from_pretrained(args.model_name, device_map="auto", **kwargs) end_measures = end_measure(start_measures) log_measures(end_measures, "Model loading") module_sizes = compute_module_sizes(model) device_size = {v: 0 for v in model.hf_device_map.values()} for module, device in model.hf_device_map.items(): device_size[device] += module_sizes[module] message = "\n".join([f"- {device}: {size // 2**20}MiB" for device, size in device_size.items()]) print(f"\nTheoretical use:\n{message}") tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name) start_measures = start_measure() generation_times = [] gen_tokens = [] texts_outs = [] for prompt in PROMPTS: inputs = tokenizer(prompt, return_tensors="pt").to(0) tokens = inputs["input_ids"][0].tolist() before_generate = time.time() outputs = model.generate(inputs["input_ids"]) after_generate = time.time() outputs = outputs[0].tolist() num_gen_tokens = len(outputs) if outputs[: len(tokens)] != tokens else len(outputs) - len(tokens) generation_time = after_generate - before_generate text_out = tokenizer.decode(outputs, skip_special_tokens=True) texts_outs.append(text_out) generation_times.append(generation_time) gen_tokens.append(num_gen_tokens) print(f"Prompt: {prompt}\nGeneration {text_out}\nIn {generation_time:.2f}s for {num_gen_tokens} tokens\n") end_measures = end_measure(start_measures) log_measures(end_measures, "Model generation") generation_times_per_token = [gen / tok for gen, tok in zip(generation_times, gen_tokens)] avg_gen = sum(generation_times_per_token) / len(generation_times) print(f"Average time of generation per token: {avg_gen:.2f}s") print(f"First generation (avg time per token): {generation_times_per_token[0]:.2f}s") avg_gen = sum(generation_times_per_token[1:]) / (len(generation_times_per_token) - 1) print(f"Average time of generation per token (excluding the first): {avg_gen:.2f}s") if __name__ == "__main__": main()
accelerate/benchmarks/big_model_inference/big_model_inference.py/0
{ "file_path": "accelerate/benchmarks/big_model_inference/big_model_inference.py", "repo_id": "accelerate", "token_count": 2241 }
# Builds GPU docker image of PyTorch specifically # Uses multi-staged approach to reduce size # Stage 1 # Use base conda image to reduce time FROM continuumio/miniconda3:latest AS compile-image # Specify py version # Note: DeepSpeed beyond v0.12.6 requires py 3.10 ENV PYTHON_VERSION=3.10 # Install apt libs RUN apt-get update && \ apt-get install -y curl git wget && \ apt-get clean && \ rm -rf /var/lib/apt/lists* # Create our conda env RUN conda create --name accelerate python=${PYTHON_VERSION} ipython jupyter pip # We don't install pytorch here yet since CUDA isn't available # instead we use the direct torch wheel ENV PATH /opt/conda/envs/accelerate/bin:$PATH # Activate our bash shell RUN chsh -s /bin/bash SHELL ["/bin/bash", "-c"] # Activate the conda env, install mpy4pi, and install torch + accelerate RUN source activate accelerate && conda install -c conda-forge mpi4py RUN source activate accelerate && \ python3 -m pip install --no-cache-dir \ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers,deepspeed] \ --extra-index-url https://download.pytorch.org/whl/cu117 RUN python3 -m pip install --no-cache-dir bitsandbytes # Stage 2 FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 AS build-image COPY --from=compile-image /opt/conda /opt/conda ENV PATH /opt/conda/bin:$PATH # Install apt libs RUN apt-get update && \ apt-get install -y curl git wget && \ apt-get clean && \ rm -rf /var/lib/apt/lists* RUN echo "source activate accelerate" >> ~/.profile # Activate the virtualenv CMD ["/bin/bash"]
accelerate/docker/accelerate-gpu-deepspeed/Dockerfile/0
{ "file_path": "accelerate/docker/accelerate-gpu-deepspeed/Dockerfile", "repo_id": "accelerate", "token_count": 560 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Gradient synchronization PyTorch's distributed module operates by communicating back and forth between all of the GPUs in your system. This communication takes time, and ensuring all processes know the states of each other happens at particular triggerpoints when using the `ddp` module. These triggerpoints are added to the PyTorch model, specifically their `forward()` and `backward()` methods. This happens when the model is wrapped with `DistributedDataParallel`: ```python import torch.nn as nn from torch.nn.parallel import DistributedDataParallel model = nn.Linear(10, 10) ddp_model = DistributedDataParallel(model) ``` In Accelerate this conversion happens automatically when calling [`~Accelerator.prepare`] and passing in your model. ```diff + from accelerate import Accelerator + accelerator = Accelerator() import torch.nn as nn - from torch.nn.parallel import DistributedDataParallel model = nn.Linear(10,10) + model = accelerator.prepare(model) ``` ## The slowdown in gradient accumulation You now understand that PyTorch adds hooks to the `forward` and `backward` method of your PyTorch model when training in a distributed setup. But how does this risk slowing down your code? In DDP (distributed data parallel), the specific order in which processes are performed and ran are expected at specific points and these must also occur at roughly the same time before moving on. The most direct example is when you update model parameters through `optimizer.step()`. Without gradient accumulation, all instances of the model need to have updated their gradients computed, collated, and updated before moving on to the next batch of data. When performing gradient accumulation, you accumulate `n` loss gradients and skip `optimizer.step()` until `n` batches have been reached. As all training processes only need to synchronize by the time `optimizer.step()` is called, without any modification to your training step, this needless inter-process communication can cause a significant slowdown. How can you avoid this overhead? ## Solving the slowdown problem Since you are skipping model parameter updates when training on these batches, their gradients do not need to be synchronized until the point where `optimizer.step()` is actually called. PyTorch cannot automagically tell when you need to do this, but they do provide a tool to help through the [`no_sync`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel.no_sync) context manager that is added to your model after converting it to DDP. Under this context manager, PyTorch will skip synchronizing the gradients when `.backward()` is called, and the first call to `.backward()` outside this context manager will trigger the synchronization. See an example below: ```python ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for index, batch in enumerate(dataloader): inputs, targets = batch # Trigger gradient synchronization on the last batch if index != (len(dataloader) - 1): with ddp_model.no_sync(): # Gradients only accumulate outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) else: # Gradients finally sync outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) optimizer.step() ``` In Accelerate to make this an API that can be called no matter the training device (though it may not do anything if you are not in a distributed system!), `ddp_model.no_sync` gets replaced with [`~Accelerator.no_sync`] and operates the same way: ```diff ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for index, batch in enumerate(dataloader): inputs, targets = batch # Trigger gradient synchronization on the last batch if index != (len(dataloader)-1): - with ddp_model.no_sync(): + with accelerator.no_sync(model): # Gradients only accumulate outputs = ddp_model(inputs) loss = loss_func(outputs, targets) accelerator.backward(loss) else: # Gradients finally sync outputs = ddp_model(inputs) loss = loss_func(outputs) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` As you may expect, the [`~Accelerator.accumulate`] function wraps around this conditional check by keeping track of the current batch number, leaving you with the final gradient accumulation API: ```python ddp_model, dataloader, optimizer = accelerator.prepare(model, dataloader, optimizer) for batch in dataloader: with accelerator.accumulate(model): optimizer.zero_grad() inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() ``` As a result, you should either use *`accelerator.accumulate` or `accelerator.no_sync`* when it comes to API choice. ## Just how much of a slowdown is there, and easy mistakes you can make To set up a realistic example, consider the following setup: * Two single-GPU T4 nodes and one node with two GPUs * Each GPU is a T4, and are hosted on GCP * The script used is a modification of the [NLP Example](https://github.com/muellerzr/timing_experiments/blob/main/baseline.py) script * Batch size per GPU is 16, and gradients are accumulated every 4 steps All scripts are available in [this repository](https://github.com/muellerzr/timing_experiments). If not careful about gradient synchronization and GPU communication, a *large* amount of time can be wasted from when these GPUs communicate to each other during unnecessary periods. By how much? Reference: - Baseline: uses no synchronization practices discussed here - `no_sync` improperly: `no_sync` only around the `backward` call, not the `forward` - `no_sync`: using the `no_sync` pattern properly - `accumulate`: using [`~Accelerator.accumulate`] properly Below are the average seconds per batch iterating over 29 batches of data for each setup on both a single node and on the dual-node setup: | | Baseline | `no_sync` improperly | `no_sync` | `accumulate`| | :---------: | :-------: | :------------------: | :-------: | :---------: | | Multi-Node | 2±0.01s | 2.13±0.08s | **0.91±0.11s** | **0.91±0.11s** | | Single Node | 0.50±0.01s | 0.50±0.01s | **0.41±0.015s** | **0.41±0.015s** | As you can see, if you are not careful about how you set up your gradient synchronization, you can get upwards of more than a 2x slowdown during training! If you are worried about making sure everything is done properly, we highly recommend utilizing the [`~Accelerator.accumulate`] function and passing in `gradient_accumulation_steps` or `gradient_accumulation_plugin` to the [`Accelerator`] object so Accelerate can handle this for you. ### `no_sync` requires additional GPU memory when using FSDP Be aware that not syncing gradients can have adverse effects while performing FSDP training. As it has been warned in `torch`, the [`no_sync` context manager for FSDP](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.no_sync) will require additional memory. Therefore in memory intensive situations while using FSDP, we recommend to set `sync_each_batch` to `True` in the [`~utils.GradientAccumulationPlugin`] to disable `no_sync`. See the example below where we fine-tune Mixtral (47B parameters) on 8 A100-80GB GPUs. We see that even for a modest `gradient_accumulation_steps=2` we quickly go out-of-memory (OOM) if `no_sync` is enabled. Again, this is due to additional memory overheads due to FSDP's `no_sync`. However, if `no_sync` is disabled via `sync_each_batch=True`, then the memory consumption for `gradient_accumulation_steps=16` reverts to that of `gradient_accumulation_steps=1`. | Model | `no_sync` (accum=1) | `no_sync` (accum=2) | `no_sync` disabled (accum=16) | :-------------: | :-----------------: | :-----------------: | :-----------------: mixtral 8x7B | 69G | OOM | 69G > [!WARNING] > Disabling `no_sync` means there _will be slowdown_ due the extra data syncs, as explained by the earlier sections of this guide.
accelerate/docs/source/concept_guides/gradient_synchronization.md/0
{ "file_path": "accelerate/docs/source/concept_guides/gradient_synchronization.md", "repo_id": "accelerate", "token_count": 2834 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Kwargs handlers The following objects can be passed to the main [`Accelerator`] to customize how some PyTorch objects related to distributed training or mixed precision are created. ## AutocastKwargs [[autodoc]] AutocastKwargs ## DistributedDataParallelKwargs [[autodoc]] DistributedDataParallelKwargs ## FP8RecipeKwargs [[autodoc]] utils.FP8RecipeKwargs ## ProfileKwargs [[autodoc]] utils.ProfileKwargs ## GradScalerKwargs [[autodoc]] GradScalerKwargs ## InitProcessGroupKwargs [[autodoc]] InitProcessGroupKwargs ## KwargsHandler [[autodoc]] utils.KwargsHandler
accelerate/docs/source/package_reference/kwargs.md/0
{ "file_path": "accelerate/docs/source/package_reference/kwargs.md", "repo_id": "accelerate", "token_count": 384 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Fully Sharded Data Parallel To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model. This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters. To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/). We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature. All you need to do is enable it through the config. ## How it works out of the box On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run `examples/nlp_example.py` (from the root of the repo) with FSDP enabled: ```bash compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_forward_prefetch: false fsdp_cpu_ram_efficient_loading: true fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` ```bash accelerate launch examples/nlp_example.py ``` Currently, `Accelerate` supports the following config through the CLI: `fsdp_sharding_strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy). For more information, please refer the official [PyTorch docs](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.ShardingStrategy). `fsdp_offload_params` : Decides Whether to offload parameters and gradients to CPU `fsdp_auto_wrap_policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP `fsdp_transformer_layer_cls_to_wrap`: Only applicable for Transformers. When using `fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP`, a user may provide a comma-separated string of transformer layer class names (case-sensitive) to wrap, e.g., `BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`. This is important because submodules that share weights (e.g., embedding layers) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by a couple of MLP layers. Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit. Therefore, use this for transformer-based models. You can use the `model._no_split_modules` for Transformer models by answering `yes` to `Do you want to use the model's `_no_split_modules` to wrap. It will try to use `model._no_split_modules` when possible. `fsdp_min_num_params`: minimum number of parameters when using `fsdp_auto_wrap_policy=SIZE_BASED_WRAP`. `fsdp_backward_prefetch_policy`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH `fsdp_forward_prefetch`: if True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. Should only be used for static-graph models since the prefetching follows the first iteration’s execution order. i.e., if the sub-modules' order changes dynamically during the model's execution do not enable this feature. `fsdp_state_dict_type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT `fsdp_use_orig_params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. This setting is useful in cases such as parameter-efficient fine-tuning as discussed in [this post](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This option also allows one to have multiple optimizer param groups. This should be `True` when creating an optimizer before preparing/wrapping the model with FSDP. `fsdp_cpu_ram_efficient_loading`: Only applicable for Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. For this to work, make sure the distributed process group is initialized before calling Transformers `from_pretrained` method. When using Trainer API, the distributed process group is initialized when you create an instance of `TrainingArguments` class. `fsdp_sync_module_states`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0. For additional and more nuanced control, you can specify other FSDP parameters via `FullyShardedDataParallelPlugin`. When creating `FullyShardedDataParallelPlugin` object, pass it the parameters that weren't part of the accelerate config or if you want to override them. The FSDP parameters will be picked based on the accelerate config file or launch command arguments and other parameters that you will pass directly through the `FullyShardedDataParallelPlugin` object will set/override that. Below is an example: ```py from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullOptimStateDictConfig, FullStateDictConfig fsdp_plugin = FullyShardedDataParallelPlugin( state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False), optim_state_dict_config=FullOptimStateDictConfig(offload_to_cpu=False, rank0_only=False), ) accelerator = Accelerator(fsdp_plugin=fsdp_plugin) ``` ## Saving and loading The new recommended way of checkpointing when using FSDP models is to use `SHARDED_STATE_DICT` as `StateDictType` when setting up the accelerate config. Below is the code snippet to save using `save_state` utility of accelerate. ```py accelerator.save_state("ckpt") ``` Inspect the checkpoint folder to see model and optimizer as shards per process: ``` ls ckpt # optimizer_0 pytorch_model_0 random_states_0.pkl random_states_1.pkl scheduler.bin cd ckpt ls optimizer_0 # __0_0.distcp __1_0.distcp ls pytorch_model_0 # __0_0.distcp __1_0.distcp ``` To load them back for resuming the training, use the `load_state` utility of accelerate ```py accelerator.load_state("ckpt") ``` When using transformers `save_pretrained`, pass `state_dict=accelerator.get_state_dict(model)` to save the model state dict. Below is an example: ```diff unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, + state_dict=accelerator.get_state_dict(model), ) ``` ### State Dict `accelerator.get_state_dict` will call the underlying `model.state_dict` implementation using `FullStateDictConfig(offload_to_cpu=True, rank0_only=True)` context manager to get the state dict only for rank 0 and it will be offloaded to CPU. You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html). If you choose to use `StateDictType.SHARDED_STATE_DICT`, the weights of the model during `Accelerator.save_state` will be split into `n` files for each sub-split on the model. To merge them back into a single dictionary to load back into the model later after training you can use the `merge_weights` utility: ```py from accelerate.utils import merge_fsdp_weights # Our weights are saved usually in a `pytorch_model_fsdp_{model_number}` folder merge_fsdp_weights("pytorch_model_fsdp_0", "output_path", safe_serialization=True) ``` The final output will then either be saved to `model.safetensors` or `pytorch_model.bin` (if `safe_serialization=False` is passed). This can also be called using the CLI: ```bash accelerate merge-weights pytorch_model_fsdp_0/ output_path ``` ## Mapping between FSDP sharding strategies and DeepSpeed ZeRO Stages * `FULL_SHARD` maps to the DeepSpeed `ZeRO Stage-3`. Shards optimizer states, gradients and parameters. * `SHARD_GRAD_OP` maps to the DeepSpeed `ZeRO Stage-2`. Shards optimizer states and gradients. * `NO_SHARD` maps to `ZeRO Stage-0`. No sharding wherein each GPU has full copy of model, optimizer states and gradients. * `HYBRID_SHARD` maps to `ZeRO++ Stage-3` wherein `zero_hpz_partition_size=<num_gpus_per_node>`. Here, this will shard optimizer states, gradients and parameters within each node while each node has full copy. ## A few caveats to be aware of - In case of multiple models, pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour. - This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of `Transformers` library. For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation. For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code. <Tip> For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed)! </Tip>
accelerate/docs/source/usage_guides/fsdp.md/0
{ "file_path": "accelerate/docs/source/usage_guides/fsdp.md", "repo_id": "accelerate", "token_count": 3344 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Originally by jiwooya1000, put together together by sayakpaul. Documentation: https://huggingface.co/docs/diffusers/main/en/training/distributed_inference Run: accelerate launch distributed_image_generation.py --batch_size 8 # Enable memory optimizations for large models like SD3 accelerate launch distributed_image_generation.py --batch_size 8 --low_mem """ import os import time import fire import torch from datasets import load_dataset from diffusers import DiffusionPipeline from tqdm import tqdm from accelerate import PartialState from accelerate.utils import gather_object START_TIME = time.strftime("%Y%m%d_%H%M%S") DTYPE_MAP = {"fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16} def get_batches(items, batch_size): num_batches = (len(items) + batch_size - 1) // batch_size batches = [] for i in range(num_batches): start_index = i * batch_size end_index = min((i + 1) * batch_size, len(items)) batch = items[start_index:end_index] batches.append(batch) return batches def main( ckpt_id: str = "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", save_dir: str = "./evaluation/examples", seed: int = 1, batch_size: int = 4, num_inference_steps: int = 20, guidance_scale: float = 4.5, dtype: str = "fp16", low_mem: bool = False, ): pipeline = DiffusionPipeline.from_pretrained(ckpt_id, torch_dtype=DTYPE_MAP[dtype]) save_dir = save_dir + f"_{START_TIME}" parti_prompts = load_dataset("nateraw/parti-prompts", split="train") data_loader = get_batches(items=parti_prompts["Prompt"], batch_size=batch_size) distributed_state = PartialState() if low_mem: pipeline.enable_model_cpu_offload(gpu_id=distributed_state.device.index) else: pipeline = pipeline.to(distributed_state.device) if distributed_state.is_main_process: if not os.path.exists(save_dir): os.makedirs(save_dir) print(f"Directory '{save_dir}' created successfully.") else: print(f"Directory '{save_dir}' already exists.") count = 0 for _, prompts_raw in tqdm(enumerate(data_loader), total=len(data_loader)): input_prompts = [] with distributed_state.split_between_processes(prompts_raw) as prompts: generator = torch.manual_seed(seed) images = pipeline( prompts, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=generator ).images input_prompts.extend(prompts) distributed_state.wait_for_everyone() images = gather_object(images) input_prompts = gather_object(input_prompts) if distributed_state.is_main_process: for image, prompt in zip(images, input_prompts): count += 1 temp_dir = os.path.join(save_dir, f"example_{count}") os.makedirs(temp_dir) prompt = "_".join(prompt.split()) image.save(f"image_{prompt}.png") if distributed_state.is_main_process: print(f">>> Image Generation Finished. Saved in {save_dir}") if __name__ == "__main__": fire.Fire(main)
accelerate/examples/inference/distributed/distributed_image_generation.py/0
{ "file_path": "accelerate/examples/inference/distributed/distributed_image_generation.py", "repo_id": "accelerate", "token_count": 1495 }
#!/bin/bash #SBATCH --job-name=multigpu #SBATCH -D . #SBATCH --output=O-%x.%j #SBATCH --error=E-%x.%j #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 # number of MP tasks #SBATCH --gres=gpu:4 # number of GPUs per node #SBATCH --cpus-per-task=160 # number of cores per tasks #SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS) ###################### ### Set enviroment ### ###################### source activateEnvironment.sh export GPUS_PER_NODE=4 ###################### export ACCELERATE_DIR="${ACCELERATE_DIR:-/accelerate}" export SCRIPT="${ACCELERATE_DIR}/examples/complete_nlp_example.py" export SCRIPT_ARGS=" \ --mixed_precision fp16 \ --output_dir ${ACCELERATE_DIR}/examples/output \ --with_tracking \ " accelerate launch --num_processes $GPUS_PER_NODE $SCRIPT $SCRIPT_ARGS
accelerate/examples/slurm/submit_multigpu.sh/0
{ "file_path": "accelerate/examples/slurm/submit_multigpu.sh", "repo_id": "accelerate", "token_count": 355 }
[tool.ruff] line-length = 119 target-version = "py38" [tool.ruff.lint] preview = true extend-select = [ "B009", # static getattr "B010", # static setattr "CPY", # Copyright "E", # PEP8 errors "F", # PEP8 formatting "I", # Import sorting "TID251", # Banned API "UP", # Pyupgrade "W", # PEP8 warnings ] ignore = [ "E501", # Line length (handled by ruff-format) "E741", # Ambiguous variable name "W605", # Invalid escape sequence "UP007", # X | Y type annotations ] [tool.ruff.lint.per-file-ignores] "__init__.py" = [ "F401", # Ignore seemingly unused imports (they're meant for re-export) ] "manim_animations/*" = ["ALL"] [tool.ruff.lint.isort] lines-after-imports = 2 known-first-party = ["accelerate"] [tool.ruff.format] exclude = [ "manim_animations/*" ] [tool.ruff.lint.flake8-tidy-imports.banned-api] "os.getenv".msg = "Use os.environ instead" "os.putenv".msg = "Use os.environ instead" "os.unsetenv".msg = "Use os.environ instead"
accelerate/pyproject.toml/0
{ "file_path": "accelerate/pyproject.toml", "repo_id": "accelerate", "token_count": 416 }
#!/usr/bin/env python # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import subprocess import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available def env_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("env") else: parser = argparse.ArgumentParser("Accelerate env command") parser.add_argument( "--config_file", default=None, help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=env_command) return parser def env_command(args): pt_version = torch.__version__ pt_cuda_available = torch.cuda.is_available() pt_xpu_available = is_xpu_available() pt_mlu_available = is_mlu_available() pt_musa_available = is_musa_available() pt_npu_available = is_npu_available() accelerate_config = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(default_config_file): accelerate_config = load_config_from_file(args.config_file).to_dict() # if we can run which, get it command = None bash_location = "Not found" if os.name == "nt": command = ["where", "accelerate"] elif os.name == "posix": command = ["which", "accelerate"] if command is not None: bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip() info = { "`Accelerate` version": version, "Platform": platform.platform(), "`accelerate` bash location": bash_location, "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", "PyTorch XPU available": str(pt_xpu_available), "PyTorch NPU available": str(pt_npu_available), "PyTorch MLU available": str(pt_mlu_available), "PyTorch MUSA available": str(pt_musa_available), "System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB", } if pt_cuda_available: info["GPU type"] = torch.cuda.get_device_name() if pt_mlu_available: info["MLU type"] = torch.mlu.get_device_name() if pt_musa_available: info["MUSA type"] = torch.musa.get_device_name() if pt_npu_available: info["CANN version"] = torch.version.cann print("\nCopy-and-paste the text below in your GitHub issue\n") print("\n".join([f"- {prop}: {val}" for prop, val in info.items()])) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:") accelerate_config_str = ( "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()]) if isinstance(accelerate_config, dict) else f"\t{accelerate_config}" ) print(accelerate_config_str) info["`Accelerate` configs"] = accelerate_config return info def main() -> int: parser = env_command_parser() args = parser.parse_args() env_command(args) return 0 if __name__ == "__main__": raise SystemExit(main())
accelerate/src/accelerate/commands/env.py/0
{ "file_path": "accelerate/src/accelerate/commands/env.py", "repo_id": "accelerate", "token_count": 1464 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import tempfile import torch from .state import AcceleratorState, PartialState from .utils import ( PrecisionType, PrepareForLaunch, are_libraries_initialized, check_cuda_p2p_ib_support, get_gpu_info, is_mps_available, is_torch_version, patch_environment, ) from .utils.constants import ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION def test_launch(): "Verify a `PartialState` can be initialized." _ = PartialState() def notebook_launcher( function, args=(), num_processes=None, mixed_precision="no", use_port="29500", master_addr="127.0.0.1", node_rank=0, num_nodes=1, rdzv_backend="static", rdzv_endpoint="", rdzv_conf=None, rdzv_id="none", max_restarts=0, monitor_interval=0.1, log_line_prefix_template=None, ): """ Launches a training function, using several processes or multiple nodes if it's possible in the current environment (TPU with multiple cores for instance). <Tip warning={true}> To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability. Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none of those calls have been made. </Tip> Args: function (`Callable`): The training function to execute. If it accepts arguments, the first argument should be the index of the process run. args (`Tuple`): Tuple of arguments to pass to the function (it will receive `*args`). num_processes (`int`, *optional*): The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to the number of GPUs available otherwise. mixed_precision (`str`, *optional*, defaults to `"no"`): If `fp16` or `bf16`, will use mixed precision training on multi-GPU. use_port (`str`, *optional*, defaults to `"29500"`): The port to use to communicate between processes when launching a multi-GPU training. master_addr (`str`, *optional*, defaults to `"127.0.0.1"`): The address to use for communication between processes. node_rank (`int`, *optional*, defaults to 0): The rank of the current node. num_nodes (`int`, *optional*, defaults to 1): The number of nodes to use for training. rdzv_backend (`str`, *optional*, defaults to `"static"`): The rendezvous method to use, such as 'static' (the default) or 'c10d' rdzv_endpoint (`str`, *optional*, defaults to `""`): The endpoint of the rdzv sync. storage. rdzv_conf (`Dict`, *optional*, defaults to `None`): Additional rendezvous configuration. rdzv_id (`str`, *optional*, defaults to `"none"`): The unique run id of the job. max_restarts (`int`, *optional*, defaults to 0): The maximum amount of restarts that elastic agent will conduct on workers before failure. monitor_interval (`float`, *optional*, defaults to 0.1): The interval in seconds that is used by the elastic_agent as a period of monitoring workers. log_line_prefix_template (`str`, *optional*, defaults to `None`): The prefix template for elastic launch logging. Available from PyTorch 2.2.0. Example: ```python # Assume this is defined in a Jupyter Notebook on an instance with two GPUs from accelerate import notebook_launcher def train(*args): # Your training function here ... notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16") ``` """ # Are we in a google colab or a Kaggle Kernel? in_colab = False in_kaggle = False if any(key.startswith("KAGGLE") for key in os.environ.keys()): in_kaggle = True elif "IPython" in sys.modules: in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) try: mixed_precision = PrecisionType(mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp from torch_xla import device_count if len(AcceleratorState._shared_state) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) launcher = PrepareForLaunch(function, distributed_type="XLA") print(f"Launching a training on {device_count()} TPU cores.") xmp.spawn(launcher, args=args, start_method="fork") elif in_colab and get_gpu_info()[1] < 2: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU.") else: print("Launching training on one CPU.") function(*args) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if node_rank >= num_nodes: raise ValueError("The node_rank must be less than the number of nodes.") if num_processes > 1: # Multi-GPU launch from torch.distributed.launcher.api import LaunchConfig, elastic_launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) # Check for specific libraries known to initialize CUDA that users constantly use problematic_imports = are_libraries_initialized("bitsandbytes") if len(problematic_imports) > 0: err = ( "Could not start distributed process. Libraries known to initialize CUDA upon import have been " "imported already. Please keep these imports inside your training function to try and help with this:" ) for lib_name in problematic_imports: err += f"\n\t* `{lib_name}`" raise RuntimeError(err) patched_env = dict( nproc=num_processes, node_rank=node_rank, world_size=num_nodes * num_processes, master_addr=master_addr, master_port=use_port, mixed_precision=mixed_precision, ) # Check for CUDA P2P and IB issues if not check_cuda_p2p_ib_support(): patched_env["nccl_p2p_disable"] = "1" patched_env["nccl_ib_disable"] = "1" # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment(**patched_env): # First dummy launch if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true": launcher = PrepareForLaunch(test_launch, distributed_type="MULTI_GPU") try: start_processes(launcher, args=(), nprocs=num_processes, start_method="fork") except ProcessRaisedException as e: err = "An issue was found when verifying a stable environment for the notebook launcher." if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( f"{err}" "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic and causing CUDA to be initialized." ) from e else: raise RuntimeError(f"{err} The following error was raised: {e}") from e # Now the actual launch launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU") print(f"Launching training on {num_processes} GPUs.") try: if rdzv_conf is None: rdzv_conf = {} if rdzv_backend == "static": rdzv_conf["rank"] = node_rank if not rdzv_endpoint: rdzv_endpoint = f"{master_addr}:{use_port}" launch_config_kwargs = dict( min_nodes=num_nodes, max_nodes=num_nodes, nproc_per_node=num_processes, run_id=rdzv_id, rdzv_endpoint=rdzv_endpoint, rdzv_backend=rdzv_backend, rdzv_configs=rdzv_conf, max_restarts=max_restarts, monitor_interval=monitor_interval, start_method="fork", ) if is_torch_version(">=", ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION): launch_config_kwargs["log_line_prefix_template"] = log_line_prefix_template elastic_launch(config=LaunchConfig(**launch_config_kwargs), entrypoint=function)(*args) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic and causing CUDA to be initialized." ) from e else: raise RuntimeError(f"An issue was found when launching the training: {e}") from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" print("Launching training on MPS.") elif torch.cuda.is_available(): print("Launching training on one GPU.") else: print("Launching training on CPU.") function(*args) def debug_launcher(function, args=(), num_processes=2): """ Launches a training function using several processes on CPU for debugging purposes. <Tip warning={true}> This function is provided for internal testing and debugging, but it's not intended for real trainings. It will only use the CPU. </Tip> Args: function (`Callable`): The training function to execute. args (`Tuple`): Tuple of arguments to pass to the function (it will receive `*args`). num_processes (`int`, *optional*, defaults to 2): The number of processes to use for training. """ from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=num_processes, master_addr="127.0.0.1", master_port="29500", accelerate_mixed_precision="no", accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu="yes", ): launcher = PrepareForLaunch(function, debug=True) start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
accelerate/src/accelerate/launchers.py/0
{ "file_path": "accelerate/src/accelerate/launchers.py", "repo_id": "accelerate", "token_count": 6039 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from transformers import ( BertConfig, BertForMaskedLM, GPT2Config, GPT2ForSequenceClassification, ) from accelerate import PartialState from accelerate.inference import prepare_pippy from accelerate.utils import DistributedType, set_seed model_to_config = { "bert": (BertForMaskedLM, BertConfig, 512), "gpt2": (GPT2ForSequenceClassification, GPT2Config, 1024), } def get_model_and_data_for_text(model_name, device, num_processes: int = 2): initializer, config, seq_len = model_to_config[model_name] config_args = {} # Eventually needed for batch inference tests on gpt-2 when bs != 1 # if model_name == "gpt2": # config_args["pad_token_id"] = 0 model_config = config(**config_args) model = initializer(model_config) kwargs = dict(low=0, high=model_config.vocab_size, device=device, dtype=torch.int64, requires_grad=False) trace_input = torch.randint(size=(1, seq_len), **kwargs) inference_inputs = torch.randint(size=(num_processes, seq_len), **kwargs) return model, trace_input, inference_inputs def test_bert(batch_size: int = 2): set_seed(42) state = PartialState() model, trace_input, inference_inputs = get_model_and_data_for_text("bert", "cpu", batch_size) model = prepare_pippy(model, example_args=(trace_input,), no_split_module_classes=model._no_split_modules) # For inference args need to be a tuple inputs = inference_inputs.to("cuda") with torch.no_grad(): output = model(inputs) # Zach: Check that we just grab the real outputs we need at the end if not state.is_last_process: assert output is None, "Output was not generated on just the last process!" else: assert output is not None, "Output was not generated in the last process!" def test_gpt2(batch_size: int = 2): set_seed(42) state = PartialState() model, trace_input, inference_inputs = get_model_and_data_for_text("gpt2", "cpu", batch_size) model = prepare_pippy(model, example_args=(trace_input,), no_split_module_classes=model._no_split_modules) # For inference args need to be a tuple inputs = inference_inputs.to("cuda") with torch.no_grad(): output = model(inputs) # Zach: Check that we just grab the real outputs we need at the end if not state.is_last_process: assert output is None, "Output was not generated on just the last process!" else: assert output is not None, "Output was not generated in the last process!" # Currently disabled, enable again once PyTorch pippy interface can trace a resnet34 # def test_resnet(batch_size: int = 2): # set_seed(42) # state = PartialState() # model = resnet34() # input_tensor = torch.rand(1, 3, 224, 224) # model = prepare_pippy( # model, # example_args=(input_tensor,), # ) # inference_inputs = torch.rand(batch_size, 3, 224, 224) # inputs = send_to_device(inference_inputs, "cuda:0") # with torch.no_grad(): # output = model(inputs) # # Zach: Check that we just grab the real outputs we need at the end # if not state.is_last_process: # assert output is None, "Output was not generated on just the last process!" # else: # assert output is not None, "Output was not generated in the last process!" if __name__ == "__main__": state = PartialState() state.print("Testing pippy integration...") try: if state.distributed_type == DistributedType.MULTI_GPU: state.print("Testing GPT2...") test_gpt2() # Issue: When modifying the tokenizer for batch GPT2 inference, there's an issue # due to references # NameError: cannot access free variable 'chunk_args_list' where it is not associated with a value in enclosing scope # test_gpt2(3) state.print("Testing BERT...") test_bert() else: print("Less than two GPUs found, not running tests!") finally: state.destroy_process_group()
accelerate/src/accelerate/test_utils/scripts/external_deps/test_pippy.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_pippy.py", "repo_id": "accelerate", "token_count": 1697 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ General namespace and dataclass related classes """ import argparse import copy import enum import functools import os import warnings from contextlib import contextmanager from dataclasses import dataclass, field from datetime import timedelta from typing import Any, Callable, Dict, Iterable, List, Literal, Optional, Tuple, Union, get_args import torch from .constants import ( BETA_TP_AVAILABLE_PYTORCH_VERSION, FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, MITA_PROFILING_AVAILABLE_PYTORCH_VERSION, XPU_PROFILING_AVAILABLE_PYTORCH_VERSION, ) from .environment import parse_flag_from_env, str_to_bool from .imports import ( is_cuda_available, is_mlu_available, is_msamp_available, is_musa_available, is_npu_available, is_transformer_engine_available, is_xpu_available, ) from .versions import compare_versions, is_torch_version class KwargsHandler: """ Internal mixin that implements a `to_kwargs()` method for a dataclass. """ def to_dict(self): return copy.deepcopy(self.__dict__) def to_kwargs(self): """ Returns a dictionary containing the attributes with values different from the default of this class. """ # import clear_environment here to avoid circular import problem from .environment import clear_environment with clear_environment(): default_dict = self.__class__().to_dict() this_dict = self.to_dict() return {k: v for k, v in this_dict.items() if default_dict[k] != v} class EnumWithContains(enum.EnumMeta): "A metaclass that adds the ability to check if `self` contains an item with the `in` operator" def __contains__(cls, item): try: cls(item) except ValueError: return False return True class BaseEnum(enum.Enum, metaclass=EnumWithContains): "An enum class that can get the value of an item with `str(Enum.key)`" def __str__(self): return self.value @classmethod def list(cls): "Method to list all the possible items in `cls`" return list(map(str, cls)) @dataclass class AutocastKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize how `torch.autocast` behaves. Please refer to the documentation of this [context manager](https://pytorch.org/docs/stable/amp.html#torch.autocast) for more information on each argument. Example: ```python from accelerate import Accelerator from accelerate.utils import AutocastKwargs kwargs = AutocastKwargs(cache_enabled=True) accelerator = Accelerator(kwargs_handlers=[kwargs]) ``` """ enabled: bool = True cache_enabled: bool = None class DDPCommunicationHookType(BaseEnum): """ Represents a type of communication hook used in DDP. Values: - **NO** -- no communication hook - **FP16** -- DDP communication hook to compress the gradients in FP16 - **BF16** -- DDP communication hook to compress the gradients in BF16 - **POWER_SGD** -- DDP communication hook to use PowerSGD - **BATCHED_POWER_SGD** -- DDP communication hook to use batched PowerSGD """ NO = "no" FP16 = "fp16" BF16 = "bf16" POWER_SGD = "power_sgd" BATCHED_POWER_SGD = "batched_power_sgd" @dataclass class DistributedDataParallelKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize how your model is wrapped in a `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more information on each argument. <Tip warning={true}> `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions. `static_graph` is only available in PyTorch 1.11.0 and later versions. </Tip> Example: ```python from accelerate import Accelerator from accelerate.utils import DistributedDataParallelKwargs kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator(kwargs_handlers=[kwargs]) ``` """ dim: int = 0 broadcast_buffers: bool = True bucket_cap_mb: int = 25 find_unused_parameters: bool = False check_reduction: bool = False gradient_as_bucket_view: bool = False static_graph: bool = False comm_hook: DDPCommunicationHookType = DDPCommunicationHookType.NO comm_wrapper: Literal[ DDPCommunicationHookType.NO, DDPCommunicationHookType.FP16, DDPCommunicationHookType.BF16 ] = DDPCommunicationHookType.NO comm_state_option: dict = field(default_factory=dict) def to_dict(self, ignore_keys=("comm_hook", "comm_wrapper", "comm_state_option")): return {k: v for k, v in super().to_dict().items() if k not in ignore_keys} def register_comm_hook(self, model): from torch.distributed.algorithms.ddp_comm_hooks import default_hooks, powerSGD_hook hook_map: Dict[DDPCommunicationHookType, Callable] = { DDPCommunicationHookType.FP16: default_hooks.fp16_compress_hook, DDPCommunicationHookType.BF16: default_hooks.bf16_compress_hook, DDPCommunicationHookType.POWER_SGD: powerSGD_hook.powerSGD_hook, DDPCommunicationHookType.BATCHED_POWER_SGD: powerSGD_hook.batched_powerSGD_hook, } wrapper_map: Dict[DDPCommunicationHookType, Callable] = { DDPCommunicationHookType.FP16: default_hooks.fp16_compress_wrapper, DDPCommunicationHookType.BF16: default_hooks.bf16_compress_wrapper, } hook: Optional[Callable] = hook_map.get(self.comm_hook) wrapper: Optional[Callable] = wrapper_map.get(self.comm_wrapper) if hook and wrapper: hook = wrapper(hook) if hook: state = ( powerSGD_hook.PowerSGDState(None, **self.comm_state_option) if self.comm_hook in (DDPCommunicationHookType.POWER_SGD, DDPCommunicationHookType.BATCHED_POWER_SGD) else None ) model.register_comm_hook( state=state, hook=hook, ) @dataclass class GradScalerKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument. <Tip warning={true}> `GradScaler` is only available in PyTorch 1.5.0 and later versions. </Tip> Example: ```python from accelerate import Accelerator from accelerate.utils import GradScalerKwargs kwargs = GradScalerKwargs(backoff_factor=0.25) accelerator = Accelerator(kwargs_handlers=[kwargs]) ``` """ init_scale: float = 65536.0 growth_factor: float = 2.0 backoff_factor: float = 0.5 growth_interval: int = 2000 enabled: bool = True @dataclass class InitProcessGroupKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize the initialization of the distributed processes. Please refer to the documentation of this [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more information on each argument. Note: If `timeout` is set to `None`, the default will be based upon how `backend` is set. ```python from datetime import timedelta from accelerate import Accelerator from accelerate.utils import InitProcessGroupKwargs kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800)) accelerator = Accelerator(kwargs_handlers=[kwargs]) ``` """ backend: Optional[str] = "nccl" init_method: Optional[str] = None timeout: Optional[timedelta] = None def __post_init__(self): if self.timeout is None: seconds = 1800 if self.backend != "nccl" else 600 self.timeout = timedelta(seconds=seconds) # Literals Backend = Literal["MSAMP", "TE"] OptLevel = Literal["O1", "O2"] FP8Format = Literal["E4M3", "HYBRID"] AmaxComputeAlgorithm = Literal["max", "most_recent"] @dataclass class FP8RecipeKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision training with `transformer-engine` or `ms-amp`. <Tip> For more information on `transformer-engine` args, please refer to the API [documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html). For more information on the `ms-amp` args, please refer to the Optimization Level [documentation](https://azure.github.io/MS-AMP/docs/user-tutorial/optimization-level). </Tip> ```python from accelerate import Accelerator from accelerate.utils import FP8RecipeKwargs kwargs = FP8RecipeKwargs(backend="te", fp8_format="HYBRID") accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs]) ``` To use MS-AMP as an engine, pass `backend="msamp"` and the `optimization_level`: ```python kwargs = FP8RecipeKwargs(backend="msamp", optimization_level="02") ``` Args: backend (`str`, *optional*): Which FP8 engine to use. Must be one of `"msamp"` (MS-AMP) or `"te"` (TransformerEngine). If not passed, will use whichever is available in the environment, prioritizing MS-AMP. use_autocast_during_eval (`bool`, *optional*, default to `False`): Whether to use FP8 autocast during eval mode. Generally better metrics are found when this is `False`. margin (`int`, *optional*, default to 0): The margin to use for the gradient scaling. interval (`int`, *optional*, default to 1): The interval to use for how often the scaling factor is recomputed. fp8_format (`str`, *optional*, default to "HYBRID"): The format to use for the FP8 recipe. Must be one of `HYBRID` or `E4M3`. (Generally `HYBRID` for training, `E4M3` for evaluation) amax_history_len (`int`, *optional*, default to 1024): The length of the history to use for the scaling factor computation amax_compute_algo (`str`, *optional*, default to "most_recent"): The algorithm to use for the scaling factor computation. Must be one of `max` or `most_recent`. override_linear_precision (`tuple` of three `bool`, *optional*, default to `(False, False, False)`): Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. optimization_level (`str`), one of `O1`, `O2`. (default is `O2`): What level of 8-bit collective communication should be used with MS-AMP. In general: * O1: Weight gradients and `all_reduce` communications are done in fp8, reducing GPU memory usage and communication bandwidth * O2: First-order optimizer states are in 8-bit, and second order states are in FP16. Only available when using Adam or AdamW. This maintains accuracy and can potentially save the highest memory. * 03: Specifically for DeepSpeed, implements capabilities so weights and master weights of models are stored in FP8. If `fp8` is selected and deepspeed is enabled, will be used by default. (Not available currently). """ backend: Backend = None use_autocast_during_eval: bool = None opt_level: OptLevel = None margin: int = None interval: int = None fp8_format: FP8Format = None amax_history_len: int = None amax_compute_algo: AmaxComputeAlgorithm = None override_linear_precision: Tuple[bool, bool, bool] = None def __post_init__(self): env_prefix = "ACCELERATE_FP8_" default_backend = "msamp" if is_msamp_available() else "te" if self.backend is None: self.backend = os.environ.get(env_prefix + "BACKEND", default_backend) self.backend = self.backend.upper() if self.backend not in get_args(Backend): raise ValueError("`backend` must be 'MSAMP' or 'TE' (TransformerEngine).") # Check TE args if self.backend == "TE": if not is_transformer_engine_available(): raise ValueError( "TransformerEngine is not available. Please either install it, or use the 'MSAMP' backend (if installed)." ) if self.use_autocast_during_eval is None: self.use_autocast_during_eval = parse_flag_from_env(env_prefix + "USE_AUTOCAST_DURING_EVAL") if self.margin is None: self.margin = int(os.environ.get(env_prefix + "MARGIN", 0)) if self.interval is None: self.interval = int(os.environ.get(env_prefix + "INTERVAL", 1)) if self.fp8_format is None: self.fp8_format = os.environ.get(env_prefix + "FORMAT", "HYBRID") self.fp8_format = self.fp8_format.upper() if self.fp8_format not in get_args(FP8Format): raise ValueError(f"`fp8_format` must be one of {' or '.join(get_args(FP8Format))}.") if self.amax_compute_algo is None: self.amax_compute_algo = os.environ.get(env_prefix + "AMAX_COMPUTE_ALGO", "most_recent") self.amax_compute_algo = self.amax_compute_algo.lower() if self.amax_compute_algo not in get_args(AmaxComputeAlgorithm): raise ValueError(f"`amax_compute_algo` must be one of {' or '.join(get_args(AmaxComputeAlgorithm))}") if self.amax_history_len is None: self.amax_history_len = int(os.environ.get(env_prefix + "AMAX_HISTORY_LEN", 1024)) if self.override_linear_precision is None: fprop = parse_flag_from_env(env_prefix + "OVERRIDE_FPROP") dgrad = parse_flag_from_env(env_prefix + "OVERRIDE_DGRAD") wgrad = parse_flag_from_env(env_prefix + "OVERRIDE_WGRAD") self.override_linear_precision = (fprop, dgrad, wgrad) elif self.backend == "MSAMP": if not is_msamp_available(): raise ValueError( "MS-AMP is not available. Please either install it, or use the 'TE' backend (if installed)." ) if self.opt_level is None: self.opt_level = os.environ.get(env_prefix + "OPT_LEVEL", "O2") if self.opt_level not in get_args(OptLevel): raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}") # Literal ProfilerActivity = Literal["cpu", "xpu", "mtia", "cuda"] @dataclass class ProfileKwargs(KwargsHandler): """ Use this object in your [`Accelerator`] to customize the initialization of the profiler. Please refer to the documentation of this [context manager](https://pytorch.org/docs/stable/profiler.html#torch.profiler.profile) for more information on each argument. <Tip warning={true}> `torch.profiler` is only available in PyTorch 1.8.1 and later versions. </Tip> Example: ```python from accelerate import Accelerator from accelerate.utils import ProfileKwargs kwargs = ProfileKwargs(activities=["cpu", "cuda"]) accelerator = Accelerator(kwargs_handlers=[kwargs]) ``` Args: activities (`List[str]`, *optional*, default to `None`): The list of activity groups to use in profiling. Must be one of `"cpu"`, `"xpu"`, `"mtia"`, or `"cuda"`. schedule_option (`Dict[str, int]`, *optional*, default to `None`): The schedule option to use for the profiler. Available keys are `wait`, `warmup`, `active`, `repeat` and `skip_first`. The profiler will skip the first `skip_first` steps, then wait for `wait` steps, then do the warmup for the next `warmup` steps, then do the active recording for the next `active` steps and then repeat the cycle starting with `wait` steps. The optional number of cycles is specified with the `repeat` parameter, the zero value means that the cycles will continue until the profiling is finished. on_trace_ready (`Callable`, *optional*, default to `None`): Callable that is called at each step when schedule returns `ProfilerAction.RECORD_AND_SAVE` during the profiling. record_shapes (`bool`, *optional*, default to `False`): Save information about operator’s input shapes. profile_memory (`bool`, *optional*, default to `False`): Track tensor memory allocation/deallocation with_stack (`bool`, *optional*, default to `False`): Record source information (file and line number) for the ops. with_flops (`bool`, *optional*, default to `False`): Use formula to estimate the FLOPS of specific operators with_modules (`bool`, *optional*, default to `False`): Record module hierarchy (including function names) corresponding to the callstack of the op. output_trace_dir (`str`, *optional*, default to `None`): Exports the collected trace in Chrome JSON format. Chrome use 'chrome://tracing' view json file. Defaults to None, which means profiling does not store json files. """ activities: Optional[List[ProfilerActivity]] = None schedule_option: Optional[Dict[str, int]] = None on_trace_ready: Optional[Callable] = None record_shapes: bool = False profile_memory: bool = False with_stack: bool = False with_flops: bool = False with_modules: bool = False output_trace_dir: Optional[str] = None def _get_profiler_activity(self, activity: ProfilerActivity) -> torch.profiler.ProfilerActivity: """Get the profiler activity from the string. Args: activity (str): The profiler activity name. Returns: torch.profiler.ProfilerActivity: The profiler activity. """ profiler_activity_map: dict[str, torch.profiler.ProfilerActivity] = { "cpu": torch.profiler.ProfilerActivity.CPU, "cuda": torch.profiler.ProfilerActivity.CUDA, } if is_torch_version(">=", XPU_PROFILING_AVAILABLE_PYTORCH_VERSION): profiler_activity_map["xpu"] = torch.profiler.ProfilerActivity.XPU if is_torch_version(">=", MITA_PROFILING_AVAILABLE_PYTORCH_VERSION): profiler_activity_map["mtia"] = torch.profiler.ProfilerActivity.MTIA if activity not in profiler_activity_map: raise ValueError(f"Invalid profiler activity: {activity}. Must be one of {list(profiler_activity_map)}.") return profiler_activity_map[activity] def build(self) -> torch.profiler.profile: """ Build a profiler object with the current configuration. Returns: torch.profiler.profile: The profiler object. """ activities: Optional[List[ProfilerActivity]] = None if self.activities is not None: activities = [self._get_profiler_activity(activity) for activity in self.activities] schedule: Optional[torch.profiler.schedule] = None if self.schedule_option is not None: schedule = torch.profiler.schedule(**self.schedule_option) return torch.profiler.profile( activities=activities, schedule=schedule, on_trace_ready=self.on_trace_ready, record_shapes=self.record_shapes, profile_memory=self.profile_memory, with_stack=self.with_stack, with_flops=self.with_flops, with_modules=self.with_modules, ) class DistributedType(str, enum.Enum): """ Represents a type of distributed environment. Values: - **NO** -- Not a distributed environment, just a single process. - **MULTI_CPU** -- Distributed on multiple CPU nodes. - **MULTI_GPU** -- Distributed on multiple GPUs. - **MULTI_MLU** -- Distributed on multiple MLUs. - **MULTI_MUSA** -- Distributed on multiple MUSAs. - **MULTI_NPU** -- Distributed on multiple NPUs. - **MULTI_XPU** -- Distributed on multiple XPUs. - **DEEPSPEED** -- Using DeepSpeed. - **XLA** -- Using TorchXLA. """ # Subclassing str as well as Enum allows the `DistributedType` to be JSON-serializable out of the box. NO = "NO" MULTI_CPU = "MULTI_CPU" MULTI_GPU = "MULTI_GPU" MULTI_NPU = "MULTI_NPU" MULTI_MLU = "MULTI_MLU" MULTI_MUSA = "MULTI_MUSA" MULTI_XPU = "MULTI_XPU" DEEPSPEED = "DEEPSPEED" FSDP = "FSDP" TP = "TP" XLA = "XLA" MEGATRON_LM = "MEGATRON_LM" class SageMakerDistributedType(str, enum.Enum): """ Represents a type of distributed environment. Values: - **NO** -- Not a distributed environment, just a single process. - **DATA_PARALLEL** -- using sagemaker distributed data parallelism. - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism. """ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box. NO = "NO" DATA_PARALLEL = "DATA_PARALLEL" MODEL_PARALLEL = "MODEL_PARALLEL" class FP8BackendType(str, enum.Enum): """ Represents the backend used for FP8. Values: - **TE** -- using TransformerEngine. - **MSAMP** -- using msamp. """ # Subclassing str as well as Enum allows the `FP8BackendType` to be JSON-serializable out of the box. TE = "TE" MSAMP = "MSAMP" class ComputeEnvironment(str, enum.Enum): """ Represents a type of the compute environment. Values: - **LOCAL_MACHINE** -- private/custom cluster hardware. - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment. """ # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box. LOCAL_MACHINE = "LOCAL_MACHINE" AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER" class DynamoBackend(str, BaseEnum): """ Represents a dynamo backend (see https://pytorch.org/docs/stable/torch.compiler.html). Values: - **NO** -- Do not use torch dynamo. - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo issues. - **AOT_EAGER** -- Uses AotAutograd with no compiler, i.e, just using PyTorch eager for the AotAutograd's extracted forward and backward graphs. This is useful for debugging, and unlikely to give speedups. - **INDUCTOR** -- Uses TorchInductor backend with AotAutograd and cudagraphs by leveraging codegened Triton kernels. [Read more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747) - **AOT_TS_NVFUSER** -- nvFuser with AotAutograd/TorchScript. [Read more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593) - **NVPRIMS_NVFUSER** -- nvFuser with PrimTorch. [Read more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-1-nvfuser-and-its-primitives/593) - **CUDAGRAPHS** -- cudagraphs with AotAutograd. [Read more](https://github.com/pytorch/torchdynamo/pull/757) - **OFI** -- Uses Torchscript optimize_for_inference. Inference only. [Read more](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html) - **FX2TRT** -- Uses Nvidia TensorRT for inference optimizations. Inference only. [Read more](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst) - **ONNXRT** -- Uses ONNXRT for inference on CPU/GPU. Inference only. [Read more](https://onnxruntime.ai/) - **TENSORRT** -- Uses ONNXRT to run TensorRT for inference optimizations. [Read more](https://github.com/onnx/onnx-tensorrt) - **AOT_TORCHXLA_TRACE_ONCE** -- Uses Pytorch/XLA with TorchDynamo optimization, for training. [Read more](https://github.com/pytorch/xla/blob/r2.0/docs/dynamo.md) - **TORCHXLA_TRACE_ONCE** -- Uses Pytorch/XLA with TorchDynamo optimization, for inference. [Read more](https://github.com/pytorch/xla/blob/r2.0/docs/dynamo.md) - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read more](https://github.com/intel/intel-extension-for-pytorch). - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/) """ # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box. NO = "NO" EAGER = "EAGER" AOT_EAGER = "AOT_EAGER" INDUCTOR = "INDUCTOR" AOT_TS_NVFUSER = "AOT_TS_NVFUSER" NVPRIMS_NVFUSER = "NVPRIMS_NVFUSER" CUDAGRAPHS = "CUDAGRAPHS" OFI = "OFI" FX2TRT = "FX2TRT" ONNXRT = "ONNXRT" TENSORRT = "TENSORRT" AOT_TORCHXLA_TRACE_ONCE = "AOT_TORCHXLA_TRACE_ONCE" TORCHXLA_TRACE_ONCE = "TORCHXLA_TRACE_ONCE" IPEX = "IPEX" TVM = "TVM" class LoggerType(BaseEnum): """Represents a type of supported experiment tracker Values: - **ALL** -- all available trackers in the environment that are supported - **TENSORBOARD** -- TensorBoard as an experiment tracker - **WANDB** -- wandb as an experiment tracker - **COMETML** -- comet_ml as an experiment tracker - **DVCLIVE** -- dvclive as an experiment tracker """ ALL = "all" AIM = "aim" TENSORBOARD = "tensorboard" WANDB = "wandb" COMETML = "comet_ml" MLFLOW = "mlflow" CLEARML = "clearml" DVCLIVE = "dvclive" class PrecisionType(str, BaseEnum): """Represents a type of precision used on floating point values Values: - **NO** -- using full precision (FP32) - **FP16** -- using half precision - **BF16** -- using brain floating point precision """ NO = "no" FP8 = "fp8" FP16 = "fp16" BF16 = "bf16" class RNGType(BaseEnum): TORCH = "torch" CUDA = "cuda" MLU = "mlu" MUSA = "musa" NPU = "npu" XLA = "xla" XPU = "xpu" GENERATOR = "generator" class CustomDtype(enum.Enum): r""" An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`. """ FP8 = "fp8" INT4 = "int4" INT2 = "int2" # data classes @dataclass class TensorInformation: shape: torch.Size dtype: torch.dtype @dataclass class DataLoaderConfiguration: """ Configuration for dataloader-related items when calling `accelerator.prepare`. Args: split_batches (`bool`, defaults to `False`): Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If `True`, the actual batch size used will be the same on any kind of distributed processes, but it must be a round multiple of `num_processes` you are using. If `False`, actual batch size used will be the one set in your script multiplied by the number of processes. dispatch_batches (`bool`, defaults to `None`): If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose underlying dataset is an `IterableDataset`, `False` otherwise. even_batches (`bool`, defaults to `True`): If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers. use_seedable_sampler (`bool`, defaults to `False`): Whether or not use a fully seedable random sampler ([`data_loader.SeedableRandomSampler`]). Ensures training results are fully reproducable using a different sampling technique. While seed-to-seed results may differ, on average the differences are neglible when using multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results. data_seed (`int`, defaults to `None`): The seed to use for the underlying generator when using `use_seedable_sampler`. If `None`, the generator will use the current default seed from torch. non_blocking (`bool`, defaults to `False`): If set to `True`, the dataloader prepared by the Accelerator will utilize non-blocking host-to-device transfers, allowing for better overlap between dataloader communication and computation. Recommended that the prepared dataloader has `pin_memory` set to `True` to work properly. use_stateful_dataloader (`bool`, defaults to `False`): If set to `True`, the dataloader prepared by the Accelerator will be backed by [torchdata.StatefulDataLoader](https://github.com/pytorch/data/tree/main/torchdata/stateful_dataloader). This requires `torchdata` version 0.8.0 or higher that supports StatefulDataLoader to be installed. """ split_batches: bool = field( default=False, metadata={ "help": "Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If" " `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a" " round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set" " in your script multiplied by the number of processes." }, ) dispatch_batches: bool = field( default=None, metadata={ "help": "If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process" " and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose" " underlying dataset is an `IterableDataset`, `False` otherwise." }, ) even_batches: bool = field( default=True, metadata={ "help": "If set to `True`, in cases where the total batch size across all processes does not exactly divide the" " dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among" " all workers." }, ) use_seedable_sampler: bool = field( default=False, metadata={ "help": "Whether or not use a fully seedable random sampler ([`data_loader.SeedableRandomSampler`])." "Ensures training results are fully reproducable using a different sampling technique. " "While seed-to-seed results may differ, on average the differences are neglible when using" "multiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results." }, ) data_seed: int = field( default=None, metadata={ "help": "The seed to use for the underlying generator when using `use_seedable_sampler`. If `None`, the generator" " will use the current default seed from torch." }, ) non_blocking: bool = field( default=False, metadata={ "help": "If set to `True`, the dataloader prepared by the Accelerator will utilize non-blocking host-to-device" " transfers, allowing for better overlap between dataloader communication and computation. Recommended that the" " prepared dataloader has `pin_memory` set to `True` to work properly." }, ) use_stateful_dataloader: bool = field( default=False, metadata={ "help": "If set to `True`, the dataloader prepared by the Accelerator will be backed by " "[torchdata.StatefulDataLoader](https://github.com/pytorch/data/tree/main/torchdata/stateful_dataloader). This requires `torchdata` version 0.8.0 or higher that supports StatefulDataLoader to be installed." }, ) @dataclass class ProjectConfiguration: """ Configuration for the Accelerator object based on inner-project needs. Args: project_dir (`str`, defaults to `None`): A path to a directory for storing data. logging_dir (`str`, defaults to `None`): A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`. automatic_checkpoint_naming (`bool`, defaults to `False`): Whether saved states should be automatically iteratively named. total_limit (`int`, defaults to `None`): The maximum number of total saved states to keep. iteration (`int`, defaults to `0`): The current save iteration. save_on_each_node (`bool`, defaults to `False`): When doing multi-node distributed training, whether to save models and checkpoints on each node, or only on the main one. """ project_dir: str = field(default=None, metadata={"help": "A path to a directory for storing data."}) logging_dir: str = field( default=None, metadata={ "help": "A path to a directory for storing logs of locally-compatible loggers. If None, defaults to `project_dir`." }, ) automatic_checkpoint_naming: bool = field( default=False, metadata={"help": "Whether saved states should be automatically iteratively named."}, ) total_limit: int = field( default=None, metadata={"help": "The maximum number of total saved states to keep."}, ) iteration: int = field( default=0, metadata={"help": "The current save iteration."}, ) save_on_each_node: bool = field( default=False, metadata={ "help": ( "When doing multi-node distributed training, whether to save models and checkpoints on each node, or" " only on the main one" ) }, ) def set_directories(self, project_dir: str = None): "Sets `self.project_dir` and `self.logging_dir` to the appropriate values." self.project_dir = project_dir if self.logging_dir is None: self.logging_dir = project_dir def __post_init__(self): self.set_directories(self.project_dir) @dataclass class GradientAccumulationPlugin(KwargsHandler): """ A plugin to configure gradient accumulation behavior. You can only pass one of `gradient_accumulation_plugin` or `gradient_accumulation_steps` to [`Accelerator`]. Passing both raises an error. Parameters: num_steps (`int`): The number of steps to accumulate gradients for. adjust_scheduler (`bool`, *optional*, defaults to `True`): Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation. sync_with_dataloader (`bool`, *optional*, defaults to `True`): Whether to synchronize setting the gradients when at the end of the dataloader. sync_each_batch (`bool`, *optional*): Whether to synchronize setting the gradients at each data batch. Seting to `True` may reduce memory requirements when using gradient accumulation with distributed training, at expense of speed. Example: ```python from accelerate.utils import GradientAccumulationPlugin gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2) accelerator = Accelerator(gradient_accumulation_plugin=gradient_accumulation_plugin) ``` """ num_steps: int = field(default=None, metadata={"help": "The number of steps to accumulate gradients for."}) adjust_scheduler: bool = field( default=True, metadata={ "help": "Whether to adjust the scheduler steps to account for the number of steps being accumulated. Should be `True` if the used scheduler was not adjusted for gradient accumulation." }, ) sync_with_dataloader: bool = field( default=True, metadata={ "help": "Whether to synchronize setting the gradients when at the end of the dataloader. Should only be set to `False` if you know what you're doing." }, ) sync_each_batch: bool = field( default=False, metadata={ "help": "Whether to synchronize setting the gradients at each data batch. Setting to `True` may reduce memory requirements when using gradient accumulation with distributed training, at expense of speed." }, ) @dataclass class TorchDynamoPlugin(KwargsHandler): """ This plugin is used to compile a model with PyTorch 2.0 Args: backend (`DynamoBackend`, defaults to `None`): A valid Dynamo backend. See https://pytorch.org/docs/stable/torch.compiler.html for more details. mode (`str`, defaults to `None`): Possible options are 'default', 'reduce-overhead' or 'max-autotune'. fullgraph (`bool`, defaults to `None`): Whether it is ok to break model into several subgraphs. dynamic (`bool`, defaults to `None`): Whether to use dynamic shape for tracing. options (`Any`, defaults to `None`): A dictionary of options to pass to the backend. disable (`bool`, defaults to `False`): Turn torch.compile() into a no-op for testing """ backend: DynamoBackend = field( default=None, metadata={"help": f"Possible options are {[b.value.lower() for b in DynamoBackend]}"}, ) mode: str = field( default=None, metadata={"help": "Possible options are 'default', 'reduce-overhead' or 'max-autotune'"} ) fullgraph: bool = field(default=None, metadata={"help": "Whether it is ok to break model into several subgraphs"}) dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"}) options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."}) disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"}) def __post_init__(self): prefix = "ACCELERATE_DYNAMO_" if self.backend is None: self.backend = os.environ.get(prefix + "BACKEND", "no") self.backend = DynamoBackend(self.backend.upper()) if self.mode is None: self.mode = os.environ.get(prefix + "MODE", "default") if self.fullgraph is None: self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1 if self.dynamic is None: self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1 def to_dict(self): dynamo_config = copy.deepcopy(self.__dict__) dynamo_config["backend"] = dynamo_config["backend"].value.lower() return dynamo_config @dataclass class DeepSpeedPlugin: """ This plugin is used to integrate DeepSpeed. Args: hf_ds_config (`Any`, defaults to `None`): Path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`. gradient_accumulation_steps (`int`, defaults to `None`): Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly. gradient_clipping (`float`, defaults to `None`): Enable gradient clipping with value. zero_stage (`int`, defaults to `None`): Possible options are 0, 1, 2, 3. Default will be taken from environment variable. is_train_batch_min (`bool`, defaults to `True`): If both train & eval dataloaders are specified, this will decide the `train_batch_size`. offload_optimizer_device (`str`, defaults to `None`): Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3. offload_param_device (`str`, defaults to `None`): Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3. offload_optimizer_nvme_path (`str`, defaults to `None`): Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3. offload_param_nvme_path (`str`, defaults to `None`): Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3. zero3_init_flag (`bool`, defaults to `None`): Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3. zero3_save_16bit_model (`bool`, defaults to `None`): Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3. transformer_moe_cls_names (`str`, defaults to `None`): Comma-separated list of Transformers MoE layer class names (case-sensitive). For example, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention`, `JetMoEBlock`, etc. enable_msamp (`bool`, defaults to `None`): Flag to indicate whether to enable MS-AMP backend for FP8 training. msasmp_opt_level (`Optional[Literal["O1", "O2"]]`, defaults to `None`): Optimization level for MS-AMP (defaults to 'O1'). Only applicable if `enable_msamp` is True. Should be one of ['O1' or 'O2']. """ hf_ds_config: Any = field( default=None, metadata={ "help": "path to DeepSpeed config file or dict or an object of class `accelerate.utils.deepspeed.HfDeepSpeedConfig`." }, ) gradient_accumulation_steps: int = field( default=None, metadata={ "help": "Number of steps to accumulate gradients before updating optimizer states. If not set, will use the value from the `Accelerator` directly." }, ) gradient_clipping: float = field(default=None, metadata={"help": "Enable gradient clipping with value"}) zero_stage: int = field( default=None, metadata={"help": "Possible options are 0,1,2,3; Default will be taken from environment variable"}, ) is_train_batch_min: bool = field( default=True, metadata={"help": "If both train & eval dataloaders are specified, this will decide the train_batch_size"}, ) offload_optimizer_device: str = field( default=None, metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stages 2 and 3."}, ) offload_param_device: str = field( default=None, metadata={"help": "Possible options are none|cpu|nvme. Only applicable with ZeRO Stage 3."}, ) offload_optimizer_nvme_path: str = field( default=None, metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."}, ) offload_param_nvme_path: str = field( default=None, metadata={"help": "Possible options are /nvme|/local_nvme. Only applicable with ZeRO Stage 3."}, ) zero3_init_flag: bool = field( default=None, metadata={ "help": "Flag to indicate whether to enable `deepspeed.zero.Init` for constructing massive models." "Only applicable with ZeRO Stage-3." }, ) zero3_save_16bit_model: bool = field( default=None, metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."}, ) transformer_moe_cls_names: str = field( default=None, metadata={ "help": "comma-separated list of transformers MoE layer class names (case-sensitive), e.g : " " `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..." }, ) enable_msamp: bool = field( default=None, metadata={"help": "Flag to indicate whether to enable MS-AMP backend for FP8 training."}, ) msamp_opt_level: Optional[Literal["O1", "O2"]] = field( default=None, metadata={ "help": "Optimization level for MS-AMP (defaults to 'O1'). Only applicable if `enable_msamp` is True. Should be one of ['O1' or 'O2']." }, ) def __post_init__(self): from .deepspeed import HfDeepSpeedConfig if self.gradient_accumulation_steps is None: gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto") self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas if self.gradient_clipping is None: gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "auto") self.gradient_clipping = gradient_clipping if gradient_clipping == "auto" else float(gradient_clipping) if self.zero_stage is None: self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2)) if self.offload_optimizer_device is None: self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none") if self.offload_param_device is None: self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none") if self.offload_optimizer_nvme_path is None: self.offload_optimizer_nvme_path = os.environ.get( "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none" ) if self.offload_param_nvme_path is None: self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none") if self.zero3_save_16bit_model is None: self.zero3_save_16bit_model = ( os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true" ) if self.enable_msamp is None: self.enable_msamp = os.environ.get("ACCELERATE_FP8_BACKEND", None) == "MSAMP" if self.msamp_opt_level is None: self.msamp_opt_level = os.environ.get("ACCELERATE_FP8_OPT_LEVEL", "O1") if self.hf_ds_config is None: self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none") if ( isinstance(self.hf_ds_config, dict) or (isinstance(self.hf_ds_config, str) and self.hf_ds_config != "none") or isinstance(self.hf_ds_config, HfDeepSpeedConfig) ): if not isinstance(self.hf_ds_config, HfDeepSpeedConfig): self.hf_ds_config = HfDeepSpeedConfig(self.hf_ds_config) if "gradient_accumulation_steps" not in self.hf_ds_config.config: self.hf_ds_config.config["gradient_accumulation_steps"] = 1 if "zero_optimization" not in self.hf_ds_config.config: raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.") self._deepspeed_config_checks() plugin_to_config_mapping = { "gradient_accumulation_steps": "gradient_accumulation_steps", "gradient_clipping": "gradient_clipping", "zero_stage": "zero_optimization.stage", "offload_optimizer_device": "zero_optimization.offload_optimizer.device", "offload_param_device": "zero_optimization.offload_param.device", "offload_param_nvme_path": "zero_optimization.offload_param.nvme_path", "offload_optimizer_nvme_path": "zero_optimization.offload_optimizer.nvme_path", "zero3_save_16bit_model": "zero_optimization.stage3_gather_16bit_weights_on_model_save", } kwargs = {v: getattr(self, k) for k, v in plugin_to_config_mapping.items() if getattr(self, k) is not None} for key in kwargs.keys(): self.fill_match(key, **kwargs, must_match=False) self.hf_ds_config.set_stage_and_offload() # filling the missing values in the class attributes from the DeepSpeed config # when using the DeepSpeed config file. for key, value in plugin_to_config_mapping.items(): config_value = self.hf_ds_config.get_value(value) if config_value is not None and config_value != "auto": setattr(self, key, config_value) else: config = { "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": self.gradient_accumulation_steps, "zero_optimization": { "stage": self.zero_stage, "offload_optimizer": { "device": self.offload_optimizer_device, "nvme_path": self.offload_optimizer_nvme_path if self.offload_optimizer_device == "nvme" else None, }, "offload_param": { "device": self.offload_param_device, "nvme_path": self.offload_param_nvme_path if self.offload_param_device == "nvme" else None, }, "stage3_gather_16bit_weights_on_model_save": self.zero3_save_16bit_model, }, } if self.gradient_clipping: config["gradient_clipping"] = self.gradient_clipping self.hf_ds_config = HfDeepSpeedConfig(config) self.deepspeed_config = self.hf_ds_config.config self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout if self.zero3_init_flag is None: self.zero3_init_flag = ( str_to_bool(os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_INIT", str(self.hf_ds_config.is_zero3()))) == 1 ) if self.zero3_init_flag and not self.hf_ds_config.is_zero3(): warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.") self.zero3_init_flag = False # NOTE: Set to False by default, will be set to `True` automatically if it's the first plugin passed # to the `Accelerator`'s `deepspeed_plugin` param, *or* `AcceleratorState().enable_deepspeed_plugin(plugin_key)` is manually called self._set_selected(False) # Ignore if it's already set if self.enable_msamp and "msamp" not in self.deepspeed_config: if self.zero_stage == 3: raise NotImplementedError( "MS-AMP is not supported for ZeRO Stage 3. Please use ZeRO Stage 0, 1, or 2 instead." ) if self.msamp_opt_level not in ["O1", "O2"]: raise ValueError("Invalid optimization level for MS-AMP. Please use one of ['O1' or'O2'].") self.deepspeed_config["msamp"] = {"enabled": True, "opt_level": self.msamp_opt_level} def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs): mismatches = [] if mismatches is None else mismatches config, ds_key = self.hf_ds_config.find_config_node(ds_key_long) if config is None: return if config.get(ds_key) == "auto": if ds_key_long in kwargs: config[ds_key] = kwargs[ds_key_long] return else: raise ValueError( f"`{ds_key_long}` not found in kwargs. " f"Please specify `{ds_key_long}` without `auto` (set to correct value) in the DeepSpeed config file or " "pass it in kwargs." ) if not must_match: return ds_val = config.get(ds_key) if ds_val is not None and ds_key_long in kwargs: if ds_val != kwargs[ds_key_long]: mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}") def is_auto(self, ds_key_long): val = self.hf_ds_config.get_value(ds_key_long) if val is None: return False else: return val == "auto" def get_value(self, ds_key_long, default=None): return self.hf_ds_config.get_value(ds_key_long, default) def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs): """Process the DeepSpeed config with the values from the kwargs.""" mismatches = [] if mismatches is None else mismatches if config is None: config = self.deepspeed_config for key, value in config.items(): if isinstance(value, dict): self.deepspeed_config_process( prefix=prefix + key + ".", mismatches=mismatches, config=value, must_match=must_match, **kwargs ) else: self.fill_match(prefix + key, mismatches, must_match=must_match, **kwargs) if len(mismatches) > 0 and prefix == "": mismatches_msg = "\n".join(mismatches) raise ValueError( "Please correct the following DeepSpeed config values that mismatch kwargs " f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'." ) def set_mixed_precision(self, mixed_precision): ds_config = self.deepspeed_config kwargs = { "fp16.enabled": mixed_precision == "fp16", # When training in fp8, we still rely on bf16 autocast for the core mixed precision "bf16.enabled": mixed_precision in ("bf16", "fp8"), } if mixed_precision == "fp16": if "fp16" not in ds_config: ds_config["fp16"] = {"enabled": True, "auto_cast": True} elif mixed_precision in ("bf16", "fp8"): if "bf16" not in ds_config: ds_config["bf16"] = {"enabled": True} if mixed_precision == "fp8" and self.enable_msamp: if "msamp" not in ds_config: ds_config["msamp"] = {"enabled": True, "opt_level": self.msamp_opt_level} if mixed_precision != "no": diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16" if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true": raise ValueError( f"`--mixed_precision` arg cannot be set to `{mixed_precision}` when `{diff_dtype}` is set in the DeepSpeed config file." ) for dtype in ["fp16", "bf16"]: if dtype not in ds_config: ds_config[dtype] = {"enabled": False} self.fill_match("fp16.enabled", must_match=False, **kwargs) self.fill_match("bf16.enabled", must_match=False, **kwargs) def set_deepspeed_weakref(self): from .imports import is_transformers_available ds_config = copy.deepcopy(self.deepspeed_config) if self.zero3_init_flag: if not is_transformers_available(): raise Exception( "When `zero3_init_flag` is set, it requires Transformers to be installed. " "Please run `pip install transformers`." ) if "gradient_accumulation_steps" not in ds_config or ds_config["gradient_accumulation_steps"] == "auto": ds_config["gradient_accumulation_steps"] = 1 if "train_micro_batch_size_per_gpu" not in ds_config or ds_config["train_micro_batch_size_per_gpu"] == "auto": ds_config["train_micro_batch_size_per_gpu"] = 1 if ds_config.get("train_batch_size", None) == "auto": del ds_config["train_batch_size"] if compare_versions("transformers", "<", "4.46"): from transformers.deepspeed import HfDeepSpeedConfig, unset_hf_deepspeed_config else: from transformers.integrations import HfDeepSpeedConfig, unset_hf_deepspeed_config unset_hf_deepspeed_config() self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa def is_zero3_init_enabled(self): return self.zero3_init_flag @contextmanager def zero3_init_context_manager(self, enable=False): old = self.zero3_init_flag if old == enable: yield else: self.zero3_init_flag = enable self.dschf = None self.set_deepspeed_weakref() yield self.zero3_init_flag = old self.dschf = None self.set_deepspeed_weakref() def _deepspeed_config_checks(self): env_variable_names_to_ignore = [ "ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "ACCELERATE_GRADIENT_CLIPPING", "ACCELERATE_DEEPSPEED_ZERO_STAGE", "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "ACCELERATE_MIXED_PRECISION", ] env_variable_names_to_ignore = [ name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore ] deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",") if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config): raise ValueError( f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n" "Please specify them appropriately in the DeepSpeed config file.\n" "If you are using an accelerate config file, remove others config variables mentioned in the above specified list.\n" "The easiest method is to create a new config following the questionnaire via `accelerate config`.\n" "It will only ask for the necessary config variables when using `deepspeed_config_file`." ) def set_moe_leaf_modules(self, model): if self.transformer_moe_cls_names is None: self.transformer_moe_cls_names = os.environ.get("ACCELERATE_DEEPSPEED_MOE_LAYER_CLS_NAMES", None) if self.transformer_moe_cls_names is not None: if compare_versions("deepspeed", "<", "0.14.0"): raise ImportError("DeepSpeed version must be >= 0.14.0 to use MOE support. Please update DeepSpeed.") from deepspeed.utils import set_z3_leaf_modules class_names = self.transformer_moe_cls_names.split(",") transformer_moe_cls = [] for layer_class in class_names: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise Exception( f"Could not find a transformer layer class called '{layer_class}' to wrap in the model." ) else: transformer_moe_cls.append(transformer_cls) set_z3_leaf_modules(model, transformer_moe_cls) # z3_leaf def select(self, _from_accelerator_state: bool = False): """ Sets the HfDeepSpeedWeakref to use the current deepspeed plugin configuration """ if not _from_accelerator_state: raise ValueError( "A `DeepSpeedPlugin` object must be enabled manually by calling `AcceleratorState().enable_deepspeed_plugin(plugin_key)`." ) self.set_deepspeed_weakref() self._set_selected(True) def _unselect(self): self._set_selected(False) def _set_selected(self, value: bool): """ Private setter for the 'enabled' attribute. """ self._selected = value @property def selected(self): return self._selected @selected.setter def selected(self, value): raise NotImplementedError( "'enabled' can only be set through calling 'AcceleratorState().enable_deepspeed_plugin(key)'." ) @dataclass class FullyShardedDataParallelPlugin: """ This plugin is used to enable fully sharded data parallelism. Args: sharding_strategy (`Union[str, torch.distributed.fsdp.ShardingStrategy]`, defaults to `'FULL_SHARD'`): Sharding strategy to use. Should be either a `str` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`. backward_prefetch (`Union[str, torch.distributed.fsdp.BackwardPrefetch]`, defaults to `'NO_PREFETCH'`): Backward prefetch strategy to use. Should be either a `str` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`. mixed_precision_policy (`Optional[Union[dict, torch.distributed.fsdp.MixedPrecision]]`, defaults to `None`): A config to enable mixed precision training with FullyShardedDataParallel. If passing in a `dict`, it should have the following keys: `param_dtype`, `reduce_dtype`, and `buffer_dtype`. auto_wrap_policy (`Optional(Union[Callable, Literal["transformer_based_wrap", "size_based_wrap", "no_wrap"]]), defaults to `NO_WRAP`): A callable or string specifying a policy to recursively wrap layers with FSDP. If a string, it must be one of `transformer_based_wrap`, `size_based_wrap`, or `no_wrap`. See `torch.distributed.fsdp.wrap.size_based_wrap_policy` for a direction on what it should look like. cpu_offload (`Union[bool, torch.distributed.fsdp.CPUOffload]`, defaults to `False`): Whether to offload parameters to CPU. Should be either a `bool` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload`. ignored_modules (`Optional[Iterable[torch.nn.Module]]`, defaults to `None`): A list of modules to ignore when wrapping with FSDP. state_dict_type (`Union[str, torch.distributed.fsdp.StateDictType]`, defaults to `'FULL_STATE_DICT'`): State dict type to use. If a string, it must be one of `full_state_dict`, `local_state_dict`, or `sharded_state_dict`. state_dict_config (`Optional[Union[torch.distributed.fsdp.FullStateDictConfig, torch.distributed.fsdp.ShardedStateDictConfig]`, defaults to `None`): State dict config to use. Is determined based on the `state_dict_type` if not passed in. optim_state_dict_config (`Optional[Union[torch.distributed.fsdp.FullOptimStateDictConfig, torch.distributed.fsdp.ShardedOptimStateDictConfig]`, defaults to `None`): Optim state dict config to use. Is determined based on the `state_dict_type` if not passed in. limit_all_gathers (`bool`, defaults to `True`): Whether to have FSDP explicitly synchronizes the CPU thread to prevent too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. Enabling this can help lower the number of CUDA malloc retries. use_orig_params (`bool`, defaults to `False`): Whether to use the original parameters for the optimizer. param_init_fn (`Optional[Callable[[torch.nn.Module], None]`, defaults to `None`): A `Callable[torch.nn.Module] -> None` that specifies how modules that are currently on the meta device should be initialized onto an actual device. Only applicable when `sync_module_states` is `True`. By default is a `lambda` which calls `to_empty` on the module. sync_module_states (`bool`, defaults to `False`): Whether each individually wrapped FSDP unit should broadcast module parameters from rank 0 to ensure they are the same across all ranks after initialization. Defaults to `False` unless `cpu_ram_efficient_loading` is `True`, then will be forcibly enabled. forward_prefetch (`bool`, defaults to `False`): Whether to have FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. only use with Static graphs. activation_checkpointing (`bool`, defaults to `False`): A technique to reduce memory usage by clearing activations of certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time for reduced memory usage. cpu_ram_efficient_loading (`bool`, defaults to `None`): If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. Only applicable for Transformers. When using this, `sync_module_states` needs to be `True`. transformer_cls_names_to_wrap (`Optional[List[str]]`, defaults to `None`): A list of transformer layer class names to wrap. Only applicable when `auto_wrap_policy` is `transformer_based_wrap`. min_num_params (`Optional[int]`, defaults to `None`): The minimum number of parameters a module must have to be wrapped. Only applicable when `auto_wrap_policy` is `size_based_wrap`. """ sharding_strategy: Union[str, "torch.distributed.fsdp.ShardingStrategy"] = field( default=None, metadata={ "help": "Sharding strategy to use. Should be either a `str` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.ShardingStrategy`. Defaults to 'FULL_SHARD'" }, ) backward_prefetch: Union[str, "torch.distributed.fsdp.BackwardPrefetch"] = field( default=None, metadata={ "help": "Backward prefetch strategy to use. Should be either a `str` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`. Defaults to 'NO_PREFETCH'" }, ) mixed_precision_policy: Optional[Union[dict, "torch.distributed.fsdp.MixedPrecision"]] = field( default=None, metadata={ "help": "A config to enable mixed precision training with FullyShardedDataParallel. " "If passing in a `dict`, it should have the following keys: `param_dtype`, `reduce_dtype`, and `buffer_dtype`." }, ) auto_wrap_policy: Optional[Union[Callable, Literal["transformer_based_wrap", "size_based_wrap", "no_wrap"]]] = ( field( default=None, metadata={ "help": "A callable or string specifying a policy to recursively wrap layers with FSDP. If a string, it must be one of `transformer_based_wrap`, `size_based_wrap`, or `no_wrap`. " "Defaults to `NO_WRAP`. See `torch.distributed.fsdp.wrap.size_based_wrap_policy` for a direction on what it should look like" }, ) ) cpu_offload: Union[bool, "torch.distributed.fsdp.CPUOffload"] = field( default=None, metadata={ "help": "Whether to offload parameters to CPU. Should be either a `bool` or an instance of `torch.distributed.fsdp.fully_sharded_data_parallel.CPUOffload`. Defaults to `False`" }, ) ignored_modules: Optional[Iterable[torch.nn.Module]] = field( default=None, metadata={"help": "A list of modules to ignore when wrapping with FSDP."}, ) state_dict_type: Union[str, "torch.distributed.fsdp.StateDictType"] = field( default=None, metadata={ "help": "State dict type to use. If a string, it must be one of `full_state_dict`, `local_state_dict`, or `sharded_state_dict`. Defaults to `FULL_STATE_DICT`" }, ) state_dict_config: Optional[ Union[ "torch.distributed.fsdp.FullStateDictConfig", "torch.distributed.fsdp.ShardedStateDictConfig", ] ] = field( default=None, metadata={"help": "State dict config to use. Is determined based on the `state_dict_type` if not passed in."}, ) optim_state_dict_config: Optional[ Union["torch.distributed.fsdp.FullOptimStateDictConfig", "torch.distributed.fsdp.ShardedOptimStateDictConfig"] ] = field( default=None, metadata={ "help": "Optim state dict config to use. Is determined based on the `state_dict_type` if not passed in." }, ) limit_all_gathers: bool = field( default=True, metadata={ "help": "Whether to have FSDP explicitly synchronizes the CPU thread to prevent " "too many in-flight all-gathers. This bool only affects the sharded strategies that schedule all-gathers. " "Enabling this can help lower the number of CUDA malloc retries." }, ) use_orig_params: bool = field( default=None, metadata={"help": "Whether to use the original parameters for the optimizer. Defaults to `False`"}, ) param_init_fn: Optional[Callable[[torch.nn.Module], None]] = field( default=None, metadata={ "help": "A Callable[torch.nn.Module] -> None that specifies how modules " "that are currently on the meta device should be initialized onto an actual device. " "Only applicable when `sync_module_states` is `True`. By default is a `lambda` which calls `to_empty` on the module." }, ) sync_module_states: bool = field( default=None, metadata={ "help": "Whether each individually wrapped FSDP unit should broadcast module parameters from rank 0 " "to ensure they are the same across all ranks after initialization. Defaults to `False` unless " "`cpu_ram_efficient_loading` is `True`, then will be forcibly enabled." }, ) forward_prefetch: bool = field( default=None, metadata={ "help": "Whether to have FSDP explicitly prefetches the next upcoming " "all-gather while executing in the forward pass. only use with Static graphs. Defaults to `False`" }, ) activation_checkpointing: bool = field( default=None, metadata={ "help": "A technique to reduce memory usage by clearing activations of " "certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time " "for reduced memory usage. Defaults to `False`" }, ) cpu_ram_efficient_loading: bool = field( default=None, metadata={ "help": "If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. " "Only applicable for 🤗 Transformers. When using this, `sync_module_states` needs to be `True`. Defaults to `False`." }, ) transformer_cls_names_to_wrap: Optional[List[str]] = field( default=None, metadata={ "help": "A list of transformer layer class names to wrap. Only applicable when `auto_wrap_policy` is `transformer_based_wrap`." }, ) min_num_params: Optional[int] = field( default=None, metadata={ "help": "The minimum number of parameters a module must have to be wrapped. Only applicable when `auto_wrap_policy` is `size_based_wrap`." }, ) def __post_init__(self): from torch.distributed.fsdp import ( BackwardPrefetch, CPUOffload, ShardingStrategy, ) env_prefix = "FSDP_" # Strategy: By default we should always assume that values are passed in, else we check the environment variables if self.sharding_strategy is None: self.sharding_strategy = os.environ.get(env_prefix + "SHARDING_STRATEGY", "FULL_SHARD") if isinstance(self.sharding_strategy, str): # We need to remap based on custom enum values for user readability if self.sharding_strategy.upper() in FSDP_SHARDING_STRATEGY: self.sharding_strategy = FSDP_SHARDING_STRATEGY.index(self.sharding_strategy.upper()) + 1 if isinstance(self.sharding_strategy, int) or self.sharding_strategy.isdigit(): self.sharding_strategy = ShardingStrategy(int(self.sharding_strategy)) else: self.sharding_strategy = ShardingStrategy[self.sharding_strategy.upper()] if self.cpu_offload is None: self.cpu_offload = str_to_bool(os.environ.get(env_prefix + "OFFLOAD_PARAMS", "False")) == 1 if isinstance(self.cpu_offload, bool): self.cpu_offload = CPUOffload(offload_params=self.cpu_offload) if self.backward_prefetch is None: self.backward_prefetch = os.environ.get(env_prefix + "BACKWARD_PREFETCH", None) if isinstance(self.backward_prefetch, str) and self.backward_prefetch.upper() == "NO_PREFETCH": self.backward_prefetch = None if self.backward_prefetch is not None and not isinstance(self.backward_prefetch, BackwardPrefetch): if isinstance(self.backward_prefetch, str) and self.backward_prefetch.upper() in FSDP_BACKWARD_PREFETCH: self.backward_prefetch = FSDP_BACKWARD_PREFETCH.index(self.backward_prefetch.upper()) + 1 if isinstance(self.backward_prefetch, int) or self.backward_prefetch.isdigit(): self.backward_prefetch = BackwardPrefetch(int(self.backward_prefetch)) else: self.backward_prefetch = BackwardPrefetch[self.backward_prefetch.upper()] self.set_state_dict_type() if self.auto_wrap_policy is None: self.auto_wrap_policy = os.environ.get(env_prefix + "AUTO_WRAP_POLICY", "NO_WRAP") if isinstance(self.auto_wrap_policy, str): if self.auto_wrap_policy.upper() not in FSDP_AUTO_WRAP_POLICY: raise ValueError( f"Invalid auto wrap policy: {self.auto_wrap_policy}. Must be one of {list(FSDP_AUTO_WRAP_POLICY.keys())}" ) from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy if self.auto_wrap_policy.upper() == "TRANSFORMER_BASED_WRAP": self.auto_wrap_policy = transformer_auto_wrap_policy if self.transformer_cls_names_to_wrap is None: self.transformer_cls_names_to_wrap = os.environ.get(env_prefix + "TRANSFORMER_CLS_TO_WRAP", None) if isinstance(self.transformer_cls_names_to_wrap, str): self.transformer_cls_names_to_wrap = self.transformer_cls_names_to_wrap.split(",") elif self.auto_wrap_policy.upper() == "SIZE_BASED_WRAP": self.auto_wrap_policy = size_based_auto_wrap_policy if self.min_num_params is None: self.min_num_params = int(os.environ.get(env_prefix + "MIN_NUM_PARAMS", 0)) elif not isinstance(self.min_num_params, int): raise ValueError( f"`min_num_params` must be an integer. Got {self.min_num_params} of type {type(self.min_num_params)}" ) elif self.auto_wrap_policy.upper() == "NO_WRAP": self.auto_wrap_policy = None if self.use_orig_params is None: self.use_orig_params = str_to_bool(os.environ.get(env_prefix + "USE_ORIG_PARAMS", "False")) == 1 if self.sync_module_states is None: self.sync_module_states = str_to_bool(os.environ.get(env_prefix + "SYNC_MODULE_STATES", "False")) == 1 if self.forward_prefetch is None: self.forward_prefetch = str_to_bool(os.environ.get(env_prefix + "FORWARD_PREFETCH", "False")) == 1 if self.activation_checkpointing is None: self.activation_checkpointing = ( str_to_bool(os.environ.get(env_prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1 ) if self.cpu_ram_efficient_loading is None: self.cpu_ram_efficient_loading = ( str_to_bool(os.environ.get(env_prefix + "CPU_RAM_EFFICIENT_LOADING", "False")) == 1 ) if self.cpu_ram_efficient_loading and not self.sync_module_states: warnings.warn( "sync_module_states cannot be False since efficient cpu ram loading enabled. " "Setting sync_module_states to True." ) self.sync_module_states = True if isinstance(self.mixed_precision_policy, dict): self.set_mixed_precision(self.mixed_precision_policy) if self.sync_module_states: if is_npu_available(): device = torch.npu.current_device() elif is_mlu_available(): device = torch.mlu.current_device() elif is_musa_available(): device = torch.musa.current_device() elif is_cuda_available(): device = torch.cuda.current_device() elif is_xpu_available(): device = torch.xpu.current_device() else: raise RuntimeError( "There are currently no available devices found, must be one of 'XPU', 'CUDA', or 'NPU'." ) # Create a function that will be used to initialize the parameters of the model # when using `sync_module_states` self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False) def set_state_dict_type(self, state_dict_type=None): """ Set the state dict config based on the `StateDictType`. """ from torch.distributed.fsdp.fully_sharded_data_parallel import ( FullOptimStateDictConfig, FullStateDictConfig, ShardedOptimStateDictConfig, ShardedStateDictConfig, StateDictType, ) # Override the state_dict_type if provided, typical use case: # user trains with sharded, but final save is with full if state_dict_type is not None: self.state_dict_type = state_dict_type if self.state_dict_type is None: self.state_dict_type = os.environ.get("FSDP_STATE_DICT_TYPE", "FULL_STATE_DICT") if isinstance(self.state_dict_type, str): if self.state_dict_type.isdigit(): self.state_dict_type = StateDictType(int(self.state_dict_type)) else: self.state_dict_type = StateDictType[self.state_dict_type.upper()] if self.state_dict_type == StateDictType.FULL_STATE_DICT: if self.state_dict_config is None: self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) if self.optim_state_dict_config is None: self.optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True) elif self.state_dict_type == StateDictType.SHARDED_STATE_DICT: if self.state_dict_config is None: self.state_dict_config = ShardedStateDictConfig(offload_to_cpu=True) if self.optim_state_dict_config is None: self.optim_state_dict_config = ShardedOptimStateDictConfig(offload_to_cpu=True) def set_auto_wrap_policy(self, model): """ Given `model`, creates an `auto_wrap_policy` baesd on the passed in policy and if we can use the `transformer_cls_to_wrap` """ from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy # First base off of `_no_split_modules` no_split_modules = getattr(model, "_no_split_modules", None) default_transformer_cls_names_to_wrap = list(no_split_modules) if no_split_modules is not None else [] if self.auto_wrap_policy == transformer_auto_wrap_policy: if self.transformer_cls_names_to_wrap is None: self.transformer_cls_names_to_wrap = default_transformer_cls_names_to_wrap transformer_cls_to_wrap = set() for layer_class in self.transformer_cls_names_to_wrap: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise ValueError(f"Could not find the transformer layer class {layer_class} in the model.") transformer_cls_to_wrap.add(transformer_cls) # Finally we set the auto_wrap_policy to a callable self.auto_wrap_policy = functools.partial( self.auto_wrap_policy, transformer_layer_cls=transformer_cls_to_wrap ) elif self.auto_wrap_policy == size_based_auto_wrap_policy: # If zero, we silently ignore it. if self.min_num_params > 0: self.auto_wrap_policy = functools.partial(self.auto_wrap_policy, min_num_params=self.min_num_params) else: self.auto_wrap_policy = None def set_mixed_precision(self, mixed_precision, buffer_autocast=False, override=False): "Sets the mixed precision policy for FSDP" mixed_precision_mapping = { "fp8": torch.bfloat16, "fp16": torch.float16, "bf16": torch.bfloat16, "fp32": torch.float32, } dtype = mixed_precision if isinstance(mixed_precision, str): dtype = mixed_precision_mapping.get(mixed_precision, None) if dtype is None: raise ValueError( f"Invalid mixed precision: {mixed_precision}. Must be one of {list(mixed_precision_mapping.keys())}" ) elif isinstance(mixed_precision, torch.dtype) and mixed_precision not in mixed_precision_mapping.values(): raise ValueError( f"Invalid mixed precision: {mixed_precision}. Must be one of {list(mixed_precision_mapping.values())}" ) buffer_type = torch.float32 if buffer_autocast else dtype from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision if override or self.mixed_precision_policy is None: self.mixed_precision_policy = MixedPrecision( param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=buffer_type ) elif isinstance(self.mixed_precision_policy, dict): # Check for incompatible types missing_keys = [ k for k in ["param_dtype", "reduce_dtype", "buffer_dtype"] if k not in self.mixed_precision_policy ] invalid_values = [ k for k, v in self.mixed_precision_policy.items() if v not in mixed_precision_mapping.values() ] if missing_keys or invalid_values: raise ValueError( f"Invalid mixed precision policy: {self.mixed_precision_policy}. " f"Must be a `dict` with keys `param_dtype`, `reduce_dtype`, and `buffer_dtype`. " f"Values must be one of {list(mixed_precision_mapping.values())}" ) self.mixed_precision_policy = MixedPrecision(**self.mixed_precision_policy) @dataclass class TorchTensorParallelPlugin: """ This plugin is used to enable tensor parallelism using PyTorch >= 2.0. """ tp_size: int = field( default=1, metadata={"help": "tensor parallel size will be used in the device mesh preparation"}, ) # torch_device_mesh is fo type "torch.distributed.DeviceMesh" torch_device_mesh: Optional["torch.distributed.DeviceMesh"] = field(default=None) def __post_init__(self): self.tp_size = self.tp_size if os.environ.get("TP_SIZE", "1") == "1" else int(os.environ.get("TP_SIZE", "1")) if self.tp_size == 1: raise ValueError("Provide TP degree > 1.") if is_torch_version("<", BETA_TP_AVAILABLE_PYTORCH_VERSION): raise ValueError( f"Minimum PyTorch version {BETA_TP_AVAILABLE_PYTORCH_VERSION} needed to use tensor parallel." ) from torch.distributed.device_mesh import init_device_mesh mesh_dim_name = "tp" device = "cuda" # support for other devices has to be investigated self.torch_device_mesh = init_device_mesh(device, (self.tp_size,), mesh_dim_names=(mesh_dim_name,)) @dataclass class MegatronLMPlugin: """ Plugin for Megatron-LM to enable tensor, pipeline, sequence and data parallelism. Also to enable selective activation recomputation and optimized fused kernels. Args: tp_degree (`int`, defaults to `None`): Tensor parallelism degree. pp_degree (`int`, defaults to `None`): Pipeline parallelism degree. num_micro_batches (`int`, defaults to `None`): Number of micro-batches. gradient_clipping (`float`, defaults to `None`): Gradient clipping value based on global L2 Norm (0 to disable). sequence_parallelism (`bool`, defaults to `None`): Enable sequence parallelism. recompute_activations (`bool`, defaults to `None`): Enable selective activation recomputation. use_distributed_optimizr (`bool`, defaults to `None`): Enable distributed optimizer. pipeline_model_parallel_split_rank (`int`, defaults to `None`): Rank where encoder and decoder should be split. num_layers_per_virtual_pipeline_stage (`int`, defaults to `None`): Number of layers per virtual pipeline stage. is_train_batch_min (`str`, defaults to `True`): If both tran & eval dataloaders are specified, this will decide the `micro_batch_size`. train_iters (`int`, defaults to `None`): Total number of samples to train over all training runs. Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`. train_samples (`int`, defaults to `None`): Total number of samples to train over all training runs. Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`. weight_decay_incr_style (`str`, defaults to `'constant'`): Weight decay increment function. choices=["constant", "linear", "cosine"]. start_weight_decay (`float`, defaults to `None`): Initial weight decay coefficient for L2 regularization. end_weight_decay (`float`, defaults to `None`): End of run weight decay coefficient for L2 regularization. lr_decay_style (`str`, defaults to `'linear'`): Learning rate decay function. choices=['constant', 'linear', 'cosine']. lr_decay_iters (`int`, defaults to `None`): Number of iterations for learning rate decay. If None defaults to `train_iters`. lr_decay_samples (`int`, defaults to `None`): Number of samples for learning rate decay. If None defaults to `train_samples`. lr_warmup_iters (`int`, defaults to `None`): Number of iterations to linearly warmup learning rate over. lr_warmup_samples (`int`, defaults to `None`): Number of samples to linearly warmup learning rate over. lr_warmup_fraction (`float`, defaults to `None`): Fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over. min_lr (`float`, defaults to `0`): Minumum value for learning rate. The scheduler clip values below this threshold. consumed_samples (`List`, defaults to `None`): Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call. no_wd_decay_cond (`Optional`, defaults to `None`): Condition to disable weight decay. scale_lr_cond (`Optional`, defaults to `None`): Condition to scale learning rate. lr_mult (`float`, defaults to `1.0`): Learning rate multiplier. megatron_dataset_flag (`bool`, defaults to `False`): Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format. seq_length (`int`, defaults to `None`): Maximum sequence length to process. encoder_seq_length (`int`, defaults to `None`): Maximum sequence length to process for the encoder. decoder_seq_length (`int`, defaults to `None`): Maximum sequence length to process for the decoder. tensorboard_dir (`str`, defaults to `None`): Path to save tensorboard logs. set_all_logging_options (`bool`, defaults to `False`): Whether to set all logging options. eval_iters (`int`, defaults to `100`): Number of iterations to run for evaluation validation/test for. eval_interval (`int`, defaults to `1000`): Interval between running evaluation on validation set. return_logits (`bool`, defaults to `False`): Whether to return logits from the model. custom_train_step_class (`Optional`, defaults to `None`): Custom train step class. custom_train_step_kwargs (`Optional`, defaults to `None`): Custom train step kwargs. custom_model_provider_function (`Optional`, defaults to `None`): Custom model provider function. custom_prepare_model_function (`Optional`, defaults to `None`): Custom prepare model function. custom_megatron_datasets_provider_function (`Optional`, defaults to `None`): Custom megatron train_valid_test datasets provider function. custom_get_batch_function (`Optional`, defaults to `None`): Custom get batch function. custom_loss_function (`Optional`, defaults to `None`): Custom loss function. other_megatron_args (`Optional`, defaults to `None`): Other Megatron-LM arguments. Please refer Megatron-LM. """ tp_degree: int = field(default=None, metadata={"help": "tensor parallelism degree."}) pp_degree: int = field(default=None, metadata={"help": "pipeline parallelism degree."}) num_micro_batches: int = field(default=None, metadata={"help": "number of micro-batches."}) gradient_clipping: float = field( default=None, metadata={"help": "gradient clipping value based on global L2 Norm (0 to disable)"} ) sequence_parallelism: bool = field( default=None, metadata={"help": "enable sequence parallelism"}, ) recompute_activations: bool = field( default=None, metadata={"help": "enable selective activation recomputation"}, ) use_distributed_optimizer: bool = field( default=None, metadata={"help": "enable distributed optimizer"}, ) pipeline_model_parallel_split_rank: int = field( default=None, metadata={"help": "Rank where encoder and decoder should be split."} ) num_layers_per_virtual_pipeline_stage: int = field( default=None, metadata={"help": "Number of layers per virtual pipeline stage."} ) is_train_batch_min: str = field( default=True, metadata={"help": "If both train & eval dataloaders are specified, this will decide the micro_batch_size"}, ) train_iters: int = field( default=None, metadata={ "help": "Total number of iterations to train over all training runs. " "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`" }, ) train_samples: int = field( default=None, metadata={ "help": "Total number of samples to train over all training runs. " "Note that either train-iters or train-samples should be provided when using `MegatronLMDummyScheduler`" }, ) weight_decay_incr_style: str = field( default="constant", metadata={"help": 'Weight decay increment function. choices=["constant", "linear", "cosine"]. '}, ) start_weight_decay: float = field( default=None, metadata={"help": "Initial weight decay coefficient for L2 regularization."}, ) end_weight_decay: float = field( default=None, metadata={"help": "End of run weight decay coefficient for L2 regularization."}, ) lr_decay_style: str = field( default="linear", metadata={"help": "Learning rate decay function. choices=['constant', 'linear', 'cosine']."}, ) lr_decay_iters: int = field( default=None, metadata={"help": "Number of iterations for learning rate decay. If None defaults to `train_iters`."}, ) lr_decay_samples: int = field( default=None, metadata={"help": "Number of samples for learning rate decay. If None defaults to `train_samples`."}, ) lr_warmup_iters: int = field( default=None, metadata={"help": "number of iterations to linearly warmup learning rate over."}, ) lr_warmup_samples: int = field( default=None, metadata={"help": "number of samples to linearly warmup learning rate over."}, ) lr_warmup_fraction: float = field( default=None, metadata={"help": "fraction of lr-warmup-(iters/samples) to linearly warmup learning rate over."}, ) min_lr: float = field( default=0, metadata={"help": "Minumum value for learning rate. The scheduler clip values below this threshold."}, ) consumed_samples: List[int] = field( default=None, metadata={ "help": "Number of samples consumed in the same order as the dataloaders to `accelerator.prepare` call." }, ) no_wd_decay_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to disable weight decay."}) scale_lr_cond: Optional[Callable] = field(default=None, metadata={"help": "Condition to scale learning rate."}) lr_mult: float = field(default=1.0, metadata={"help": "Learning rate multiplier."}) megatron_dataset_flag: bool = field( default=False, metadata={"help": "Whether the format of dataset follows Megatron-LM Indexed/Cached/MemoryMapped format."}, ) seq_length: int = field( default=None, metadata={"help": "Maximum sequence length to process."}, ) encoder_seq_length: int = field( default=None, metadata={"help": "Maximum sequence length to process for the encoder."}, ) decoder_seq_length: int = field( default=None, metadata={"help": "Maximum sequence length to process for the decoder."}, ) tensorboard_dir: str = field( default=None, metadata={"help": "Path to save tensorboard logs."}, ) set_all_logging_options: bool = field( default=False, metadata={"help": "Whether to set all logging options."}, ) eval_iters: int = field( default=100, metadata={"help": "Number of iterations to run for evaluation validation/test for."} ) eval_interval: int = field( default=1000, metadata={"help": "Interval between running evaluation on validation set."} ) return_logits: bool = field( default=False, metadata={"help": "Whether to return logits from the model."}, ) # custom train step args custom_train_step_class: Optional[Any] = field( default=None, metadata={"help": "Custom train step class."}, ) custom_train_step_kwargs: Optional[Dict[str, Any]] = field( default=None, metadata={"help": "Custom train step kwargs."}, ) # custom model args custom_model_provider_function: Optional[Callable] = field( default=None, metadata={"help": "Custom model provider function."}, ) custom_prepare_model_function: Optional[Callable] = field( default=None, metadata={"help": "Custom prepare model function."}, ) custom_megatron_datasets_provider_function: Optional[Callable] = field( default=None, metadata={"help": "Custom megatron train_valid_test datasets provider function."}, ) custom_get_batch_function: Optional[Callable] = field( default=None, metadata={"help": "Custom get batch function."}, ) custom_loss_function: Optional[Callable] = field( default=None, metadata={"help": "Custom loss function."}, ) # remaining args such as enabling Alibi/ROPE positional embeddings, # wandb logging, Multi-Query Attention, etc. other_megatron_args: Optional[Dict[str, Any]] = field( default=None, metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"}, ) def __post_init__(self): prefix = "MEGATRON_LM_" if self.tp_degree is None: self.tp_degree = int(os.environ.get(prefix + "TP_DEGREE", 1)) if self.pp_degree is None: self.pp_degree = int(os.environ.get(prefix + "PP_DEGREE", 1)) if self.num_micro_batches is None: self.num_micro_batches = int(os.environ.get(prefix + "NUM_MICRO_BATCHES", 1)) if self.gradient_clipping is None: self.gradient_clipping = float(os.environ.get(prefix + "GRADIENT_CLIPPING", 1.0)) if self.recompute_activations is None: self.recompute_activations = str_to_bool(os.environ.get(prefix + "RECOMPUTE_ACTIVATIONS", "False")) == 1 if self.use_distributed_optimizer is None: self.use_distributed_optimizer = ( str_to_bool(os.environ.get(prefix + "USE_DISTRIBUTED_OPTIMIZER", "False")) == 1 ) if self.sequence_parallelism is None: self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1 if self.pp_degree > 1 or self.use_distributed_optimizer: self.DDP_impl = "local" else: self.DDP_impl = "torch" if self.consumed_samples is not None: if len(self.consumed_samples) == 1: self.consumed_samples.extend([0, 0]) elif len(self.consumed_samples) == 2: self.consumed_samples.append(0) self.megatron_lm_default_args = { "tensor_model_parallel_size": self.tp_degree, "pipeline_model_parallel_size": self.pp_degree, "pipeline_model_parallel_split_rank": self.pipeline_model_parallel_split_rank, "num_layers_per_virtual_pipeline_stage": self.num_layers_per_virtual_pipeline_stage, "DDP_impl": self.DDP_impl, "use_distributed_optimizer": self.use_distributed_optimizer, "sequence_parallel": self.sequence_parallelism, "clip_grad": self.gradient_clipping, "num_micro_batches": self.num_micro_batches, "consumed_samples": self.consumed_samples, "no_wd_decay_cond": self.no_wd_decay_cond, "scale_lr_cond": self.scale_lr_cond, "lr_mult": self.lr_mult, "megatron_dataset_flag": self.megatron_dataset_flag, "eval_iters": self.eval_iters, "eval_interval": self.eval_interval, } if self.recompute_activations: self.megatron_lm_default_args["recompute_granularity"] = "selective" if self.tensorboard_dir is not None: self.megatron_lm_default_args["tensorboard_dir"] = self.tensorboard_dir if self.set_all_logging_options: self.set_tensorboard_logging_options() if self.other_megatron_args is not None: self.megatron_lm_default_args.update(self.other_megatron_args) def set_network_size_args(self, model, batch_data=None): model_config_type = model.config.model_type.lower() for model_type in MODEL_CONFIGS_TO_MEGATRON_PARSERS.keys(): if model_type in model_config_type: MODEL_CONFIGS_TO_MEGATRON_PARSERS[model_type](self, model, batch_data) return raise ValueError( f"Accelerate Megatron-LM integration not supports {model_config_type} model. " "You can add your own model config parser." ) def set_mixed_precision(self, mixed_precision): if mixed_precision == "fp16": self.megatron_lm_default_args["fp16"] = True elif mixed_precision == "bf16": self.megatron_lm_default_args["bf16"] = True self.DDP_impl = "local" self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl def set_training_args(self, micro_batch_size, dp_degree): self.data_parallel_size = dp_degree self.micro_batch_size = micro_batch_size self.global_batch_size = dp_degree * micro_batch_size * self.num_micro_batches self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size def set_optimizer_type(self, optimizer): optimizer_name = optimizer.__class__.__name__.lower() if "adam" in optimizer_name: self.megatron_lm_default_args["optimizer"] = "adam" self.megatron_lm_default_args["adam_beta1"] = optimizer.defaults["betas"][0] self.megatron_lm_default_args["adam_beta2"] = optimizer.defaults["betas"][1] self.megatron_lm_default_args["adam_eps"] = optimizer.defaults["eps"] elif "sgd" in optimizer_name: self.megatron_lm_default_args["optimizer"] = "sgd" self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"] else: raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM") self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"] self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"] def set_scheduler_args(self, scheduler): if self.train_iters is None: self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"] if self.train_samples is not None: self.train_samples = None warnings.warn( "Ignoring `train_samples` as `train_iters` based on scheduler is being used for training." ) if self.lr_warmup_iters is None: self.lr_warmup_iters = scheduler.warmup_num_steps // self.megatron_lm_default_args["data_parallel_size"] if self.lr_warmup_samples is not None: warnings.warn( "Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training." ) self.lr_warmup_samples = 0 self.megatron_lm_default_args["train_iters"] = self.train_iters self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters self.megatron_lm_default_args["train_samples"] = self.train_samples self.megatron_lm_default_args["lr_warmup_samples"] = self.lr_warmup_samples self.megatron_lm_default_args["lr_decay_iters"] = self.lr_decay_iters self.megatron_lm_default_args["lr_decay_samples"] = self.lr_decay_samples self.megatron_lm_default_args["lr_warmup_fraction"] = self.lr_warmup_fraction self.megatron_lm_default_args["lr_decay_style"] = self.lr_decay_style self.megatron_lm_default_args["weight_decay_incr_style"] = self.weight_decay_incr_style self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay self.megatron_lm_default_args["min_lr"] = self.min_lr def set_tensorboard_logging_options(self): from megatron.training.arguments import _add_logging_args parser = argparse.ArgumentParser() parser = _add_logging_args(parser) logging_args = parser.parse_known_args() self.dataset_args = vars(logging_args[0]) for key, value in self.dataset_args.items(): if key.startswith("log_"): self.megatron_lm_default_args[key] = True elif key.startswith("no_log_"): self.megatron_lm_default_args[key.replace("no_", "")] = True MODEL_CONFIGS_TO_MEGATRON_PARSERS = {} def add_model_config_to_megatron_parser(model_type: str): def add_model_config_parser_helper(func): @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) MODEL_CONFIGS_TO_MEGATRON_PARSERS[model_type] = func return wrapper return add_model_config_parser_helper @add_model_config_to_megatron_parser("megatron-bert") def parse_bert_config(megatron_lm_plugin, model, batch_data): model_type_name = "bert" num_layers = model.config.num_hidden_layers hidden_size = model.config.hidden_size num_attention_heads = model.config.num_attention_heads max_position_embeddings = model.config.max_position_embeddings num_labels = model.config.num_labels orig_vocab_size = model.config.vocab_size pretraining_flag = False if "maskedlm" in model.__class__.__name__.lower(): pretraining_flag = True if megatron_lm_plugin.seq_length is not None: if megatron_lm_plugin.encoder_seq_length is not None: warnings.warn("Both `seq_length` and `encoder_seq_length` are set. Using `encoder_seq_length`.") megatron_lm_plugin.seq_length = megatron_lm_plugin.encoder_seq_length elif megatron_lm_plugin.encoder_seq_length is not None: megatron_lm_plugin.seq_length = megatron_lm_plugin.encoder_seq_length elif batch_data is not None: megatron_lm_plugin.seq_length = batch_data["input_ids"].shape[1] else: megatron_lm_plugin.seq_length = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["seq_length"] = megatron_lm_plugin.seq_length megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict megatron_lm_plugin.megatron_lm_default_args["num_labels"] = num_labels @add_model_config_to_megatron_parser("gpt2") def parse_gpt2_config(megatron_lm_plugin, model, batch_data): model_type_name = "gpt" num_layers = model.config.n_layer hidden_size = model.config.n_embd num_attention_heads = model.config.n_head max_position_embeddings = model.config.n_positions orig_vocab_size = model.config.vocab_size pretraining_flag = True if megatron_lm_plugin.seq_length is not None: if megatron_lm_plugin.decoder_seq_length is not None: warnings.warn("Both `seq_length` and `decoder_seq_length` are set. Using `decoder_seq_length`.") megatron_lm_plugin.seq_length = megatron_lm_plugin.decoder_seq_length elif megatron_lm_plugin.decoder_seq_length is not None: megatron_lm_plugin.seq_length = megatron_lm_plugin.decoder_seq_length elif batch_data is not None: megatron_lm_plugin.seq_length = batch_data["input_ids"].shape[1] else: megatron_lm_plugin.seq_length = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["seq_length"] = megatron_lm_plugin.seq_length megatron_lm_plugin.megatron_lm_default_args["return_logits"] = megatron_lm_plugin.return_logits megatron_lm_plugin.megatron_lm_default_args["tokenizer_type"] = "GPT2BPETokenizer" megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict @add_model_config_to_megatron_parser("t5") def parse_t5_config(megatron_lm_plugin, model, batch_data): model_type_name = "t5" num_layers = model.config.num_layers hidden_size = model.config.d_model num_attention_heads = model.config.num_heads max_position_embeddings = model.config.n_positions if hasattr(model.config, "n_positions") else 1024 orig_vocab_size = model.config.vocab_size pretraining_flag = True if megatron_lm_plugin.encoder_seq_length is None: if batch_data is not None: megatron_lm_plugin.encoder_seq_length = batch_data["input_ids"].shape[1] else: megatron_lm_plugin.encoder_seq_length = max_position_embeddings if megatron_lm_plugin.decoder_seq_length is None: if batch_data is not None: megatron_lm_plugin.decoder_seq_length = batch_data["labels"].shape[1] else: megatron_lm_plugin.decoder_seq_length = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["encoder_seq_length"] = megatron_lm_plugin.encoder_seq_length megatron_lm_plugin.megatron_lm_default_args["decoder_seq_length"] = megatron_lm_plugin.decoder_seq_length megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict @add_model_config_to_megatron_parser("llama") def parse_llama_config(megatron_lm_plugin, model, batch_data): model_type_name = "gpt" num_layers = model.config.num_hidden_layers pretraining_flag = True hidden_size = model.config.hidden_size num_attention_heads = model.config.num_attention_heads orig_vocab_size = model.config.vocab_size max_position_embeddings = model.config.max_position_embeddings seq_length = getattr(model.config, "max_sequence_length", None) if megatron_lm_plugin.seq_length is None: if seq_length is not None: megatron_lm_plugin.seq_length = seq_length elif megatron_lm_plugin.decoder_seq_length is not None: megatron_lm_plugin.seq_length = megatron_lm_plugin.decoder_seq_length elif batch_data is not None: megatron_lm_plugin.seq_length = batch_data["input_ids"].shape[1] else: megatron_lm_plugin.seq_length = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["return_logits"] = megatron_lm_plugin.return_logits megatron_lm_plugin.megatron_lm_default_args["tokenizer_type"] = "Llama2Tokenizer" megatron_lm_plugin.megatron_lm_default_args["model_type_name"] = model_type_name megatron_lm_plugin.megatron_lm_default_args["num_layers"] = num_layers megatron_lm_plugin.megatron_lm_default_args["pretraining_flag"] = pretraining_flag megatron_lm_plugin.megatron_lm_default_args["hidden_size"] = hidden_size megatron_lm_plugin.megatron_lm_default_args["num_attention_heads"] = num_attention_heads megatron_lm_plugin.megatron_lm_default_args["orig_vocab_size"] = orig_vocab_size megatron_lm_plugin.megatron_lm_default_args["max_position_embeddings"] = max_position_embeddings megatron_lm_plugin.megatron_lm_default_args["seq_length"] = megatron_lm_plugin.seq_length megatron_lm_plugin.megatron_lm_default_args["model_return_dict"] = model.config.return_dict @dataclass class BnbQuantizationConfig: """ A plugin to enable BitsAndBytes 4bit and 8bit quantization Args: load_in_8bit (`bool`, defaults to `False`): Enable 8bit quantization. llm_int8_threshold (`float`, defaults to `6.0`): Value of the outliner threshold. Only relevant when `load_in_8bit=True`. load_in_4_bit (`bool`, defaults to `False`): Enable 4bit quantization. bnb_4bit_quant_type (`str`, defaults to `fp4`): Set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}. bnb_4bit_use_double_quant (`bool`, defaults to `False`): Enable nested quantization where the quantization constants from the first quantization are quantized again. bnb_4bit_compute_dtype (`bool`, defaults to `fp16`): This sets the computational type which might be different than the input time. For example, inputs might be fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}. torch_dtype (`torch.dtype`, defaults to `None`): This sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model. skip_modules (`List[str]`, defaults to `None`): An explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`. keep_in_fp32_modules (`List`, defaults to `None`): An explicit list of the modules that we don't quantize. We keep them in `torch.float32`. """ load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."}) llm_int8_threshold: float = field( default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"} ) load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."}) bnb_4bit_quant_type: str = field( default="fp4", metadata={ "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','nf4'}." }, ) bnb_4bit_use_double_quant: bool = field( default=False, metadata={ "help": "enable nested quantization where the quantization constants from the first quantization are quantized again." }, ) bnb_4bit_compute_dtype: str = field( default="fp16", metadata={ "help": "This sets the computational type which might be different than the input time. For example, inputs might be " "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}." }, ) torch_dtype: torch.dtype = field( default=None, metadata={ "help": "this sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value" "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model " }, ) skip_modules: List[str] = field( default=None, metadata={ "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`." }, ) keep_in_fp32_modules: List[str] = field( default=None, metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."}, ) def __post_init__(self): """ Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ if not isinstance(self.load_in_8bit, bool): raise ValueError("load_in_8bit must be a boolean") if not isinstance(self.load_in_4bit, bool): raise ValueError("load_in_4bit must be a boolean") if self.load_in_4bit and self.load_in_8bit: raise ValueError("load_in_4bit and load_in_8bit can't be both True") if not self.load_in_4bit and not self.load_in_8bit: raise ValueError("load_in_4bit and load_in_8bit can't be both False") if not isinstance(self.llm_int8_threshold, (int, float)): raise ValueError("llm_int8_threshold must be a float or an int") if not isinstance(self.bnb_4bit_quant_type, str): raise ValueError("bnb_4bit_quant_type must be a string") elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]: raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}") if not isinstance(self.bnb_4bit_use_double_quant, bool): raise ValueError("bnb_4bit_use_double_quant must be a boolean") if isinstance(self.bnb_4bit_compute_dtype, str): if self.bnb_4bit_compute_dtype == "fp32": self.bnb_4bit_compute_dtype = torch.float32 elif self.bnb_4bit_compute_dtype == "fp16": self.bnb_4bit_compute_dtype = torch.float16 elif self.bnb_4bit_compute_dtype == "bf16": self.bnb_4bit_compute_dtype = torch.bfloat16 else: raise ValueError( f"bnb_4bit_compute_dtype must be in ['fp32','fp16','bf16'] but found {self.bnb_4bit_compute_dtype}" ) elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") if self.skip_modules is not None and not isinstance(self.skip_modules, list): raise ValueError("skip_modules must be a list of strings") if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list): raise ValueError("keep_in_fp_32_modules must be a list of strings") if self.load_in_4bit: self.target_dtype = CustomDtype.INT4 if self.load_in_8bit: self.target_dtype = torch.int8 if self.load_in_4bit and self.llm_int8_threshold != 6.0: warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit") if isinstance(self.torch_dtype, str): if self.torch_dtype == "fp32": self.torch_dtype = torch.float32 elif self.torch_dtype == "fp16": self.torch_dtype = torch.float16 elif self.torch_dtype == "bf16": self.torch_dtype = torch.bfloat16 else: raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}") if self.load_in_8bit and self.torch_dtype is None: self.torch_dtype = torch.float16 if self.load_in_4bit and self.torch_dtype is None: self.torch_dtype = self.bnb_4bit_compute_dtype if not isinstance(self.torch_dtype, torch.dtype): raise ValueError("torch_dtype must be a torch.dtype") def get_module_class_from_name(module, name): """ Gets a class from a module by its name. Args: module (`torch.nn.Module`): The module to get the class from. name (`str`): The name of the class. """ modules_children = list(module.children()) if module.__class__.__name__ == name: return module.__class__ elif len(modules_children) == 0: return else: for child_module in modules_children: module_class = get_module_class_from_name(child_module, name) if module_class is not None: return module_class
accelerate/src/accelerate/utils/dataclasses.py/0
{ "file_path": "accelerate/src/accelerate/utils/dataclasses.py", "repo_id": "accelerate", "token_count": 50957 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from types import MethodType import torch.nn as nn from .imports import is_fp8_available from .operations import GatheredParameters # Do not import `transformer_engine` at package level to avoid potential issues def convert_model(model, to_transformer_engine=True, _convert_linear=True, _convert_ln=True): """ Recursively converts the linear and layernorm layers of a model to their `transformers_engine` counterpart. """ if not is_fp8_available(): raise ImportError("Using `convert_model` requires transformer_engine to be installed.") import transformer_engine.pytorch as te for name, module in model.named_children(): if isinstance(module, nn.Linear) and to_transformer_engine and _convert_linear: has_bias = module.bias is not None params_to_gather = [module.weight] if has_bias: params_to_gather.append(module.bias) with GatheredParameters(params_to_gather, modifier_rank=0): if any(p % 16 != 0 for p in module.weight.shape): return te_module = te.Linear( module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype ) te_module.weight.copy_(module.weight) if has_bias: te_module.bias.copy_(module.bias) setattr(model, name, te_module) # Note: @xrsrke (Phuc) found that te.LayerNorm doesn't have any real memory savings or speedups over nn.LayerNorm elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln: with GatheredParameters([module.weight, module.bias], modifier_rank=0): te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) te_module.weight.copy_(module.weight) te_module.bias.copy_(module.bias) setattr(model, name, te_module) elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear: has_bias = module.bias is not None new_module = nn.Linear( module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype ) new_module.weight.copy_(module.weight) if has_bias: new_module.bias.copy_(module.bias) setattr(model, name, new_module) elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln: new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) new_module.weight.copy_(module.weight) new_module.bias.copy_(module.bias) setattr(model, name, new_module) else: convert_model( module, to_transformer_engine=to_transformer_engine, _convert_linear=_convert_linear, _convert_ln=_convert_ln, ) def has_transformer_engine_layers(model): """ Returns whether a given model has some `transformer_engine` layer or not. """ if not is_fp8_available(): raise ImportError("Using `has_transformer_engine_layers` requires transformer_engine to be installed.") import transformer_engine.pytorch as te for m in model.modules(): if isinstance(m, (te.LayerNorm, te.Linear, te.TransformerLayer)): return True return False def contextual_fp8_autocast(model_forward, fp8_recipe, use_during_eval=False): """ Wrapper for a model's forward method to apply FP8 autocast. Is context aware, meaning that by default it will disable FP8 autocast during eval mode, which is generally better for more accurate metrics. """ if not is_fp8_available(): raise ImportError("Using `contextual_fp8_autocast` requires transformer_engine to be installed.") from transformer_engine.pytorch import fp8_autocast def forward(self, *args, **kwargs): enabled = use_during_eval or self.training with fp8_autocast(enabled=enabled, fp8_recipe=fp8_recipe): return model_forward(*args, **kwargs) # To act like a decorator so that it can be popped when doing `extract_model_from_parallel` forward.__wrapped__ = model_forward return forward def apply_fp8_autowrap(model, fp8_recipe_handler): """ Applies FP8 context manager to the model's forward method """ if not is_fp8_available(): raise ImportError("Using `apply_fp8_autowrap` requires transformer_engine to be installed.") import transformer_engine.common.recipe as te_recipe kwargs = fp8_recipe_handler.to_kwargs() if fp8_recipe_handler is not None else {} if "fp8_format" in kwargs: kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"]) use_during_eval = kwargs.pop("use_autocast_during_eval", False) fp8_recipe = te_recipe.DelayedScaling(**kwargs) new_forward = contextual_fp8_autocast(model.forward, fp8_recipe, use_during_eval) if hasattr(model.forward, "__func__"): model.forward = MethodType(new_forward, model) else: model.forward = new_forward return model
accelerate/src/accelerate/utils/transformer_engine.py/0
{ "file_path": "accelerate/src/accelerate/utils/transformer_engine.py", "repo_id": "accelerate", "token_count": 2373 }
compute_environment: LOCAL_MACHINE debug: false distributed_type: MULTI_GPU downcast_bf16: 'no' enable_cpu_affinity: false fp8_config: amax_compute_algorithm: max amax_history_length: 1024 backend: TE fp8_format: E4M3 interval: 1 margin: 0 override_linear_precision: false use_autocast_during_eval: false gpu_ids: all machine_rank: 0 main_training_function: main mixed_precision: fp8 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false
accelerate/tests/test_configs/0_34_0_fp8.yaml/0
{ "file_path": "accelerate/tests/test_configs/0_34_0_fp8.yaml", "repo_id": "accelerate", "token_count": 216 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class ModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class OffloadTester(unittest.TestCase): def test_offload_state_dict(self): model = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(tmp_dir, model.state_dict()) index_file = os.path.join(tmp_dir, "index.json") assert os.path.isfile(index_file) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: weight_file = os.path.join(tmp_dir, f"{key}.dat") assert os.path.isfile(weight_file) # TODO: add tests on the fact weights are properly loaded def test_offload_weight(self): dtypes = [torch.float16, torch.float32, torch.bfloat16] for dtype in dtypes: weight = torch.randn(2, 3, dtype=dtype) with TemporaryDirectory() as tmp_dir: index = offload_weight(weight, "weight", tmp_dir, {}) weight_file = os.path.join(tmp_dir, "weight.dat") assert os.path.isfile(weight_file) assert index == {"weight": {"shape": [2, 3], "dtype": str(dtype).split(".")[1]}} new_weight = load_offloaded_weight(weight_file, index["weight"]) assert torch.equal(weight, new_weight) def test_offload_weights_loader(self): model = ModelForTest() state_dict = model.state_dict() cpu_part = {k: v for k, v in state_dict.items() if "linear2" not in k} disk_part = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(tmp_dir, disk_part) weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir) # Every key is there with the right value assert sorted(weight_map) == sorted(state_dict.keys()) for key, param in state_dict.items(): assert torch.allclose(param, weight_map[key]) cpu_part = {k: v for k, v in state_dict.items() if "weight" in k} disk_part = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(tmp_dir, disk_part) weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir) # Every key is there with the right value assert sorted(weight_map) == sorted(state_dict.keys()) for key, param in state_dict.items(): assert torch.allclose(param, weight_map[key]) with TemporaryDirectory() as tmp_dir: offload_state_dict(tmp_dir, state_dict) # Duplicates are removed weight_map = OffloadedWeightsLoader(state_dict=cpu_part, save_folder=tmp_dir) # Every key is there with the right value assert sorted(weight_map) == sorted(state_dict.keys()) for key, param in state_dict.items(): assert torch.allclose(param, weight_map[key]) def test_extract_submodules_state_dict(self): state_dict = {"a.1": 0, "a.10": 1, "a.2": 2} extracted = extract_submodules_state_dict(state_dict, ["a.1", "a.2"]) assert extracted == {"a.1": 0, "a.2": 2} state_dict = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} extracted = extract_submodules_state_dict(state_dict, ["a.1", "a.2"]) assert extracted == {"a.1.a": 0, "a.2.a": 2}
accelerate/tests/test_offload.py/0
{ "file_path": "accelerate/tests/test_offload.py", "repo_id": "accelerate", "token_count": 1981 }
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{DType, Device, Tensor}; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn run(a: &Tensor) { a.affine(12.34, 56.78).unwrap(); } fn run_affine_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) { let b = 1; let m = 1024; let k = 1024; let tensor = Tensor::zeros((b, m, k), dtype, device).unwrap(); let flops = b * m * k * dtype.size_in_bytes(); let mut group = c.benchmark_group(device.bench_name(name)); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run(black_box(&tensor)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let handler = BenchDeviceHandler::new().unwrap(); for device in handler.devices { run_affine_benchmark(c, &device, DType::F32, "affine_f32"); run_affine_benchmark(c, &device, DType::F16, "affine_f16"); run_affine_benchmark(c, &device, DType::BF16, "affine_bf16"); } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/affine.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/affine.rs", "repo_id": "candle", "token_count": 590 }
//! 1D and 2D Convolutions //! use crate::{op::BackpropOp, op::Op, Error, Result, Tensor}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConv1D { pub(crate) b_size: usize, // Maybe we should have a version without l_in as this bit depends on the input and not only on // the weights. pub(crate) l_in: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) k_size: usize, pub(crate) padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConv1D { pub(crate) fn l_out(&self) -> usize { (self.l_in + 2 * self.padding - self.dilation * (self.k_size - 1) - 1) / self.stride + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { let l_out = self.l_out(); vec![self.b_size, self.c_out, l_out] } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConvTranspose1D { pub(crate) b_size: usize, pub(crate) l_in: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) k_size: usize, pub(crate) padding: usize, pub(crate) output_padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConvTranspose1D { pub(crate) fn l_out(&self) -> usize { (self.l_in - 1) * self.stride - 2 * self.padding + self.dilation * (self.k_size - 1) + self.output_padding + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { let l_out = self.l_out(); vec![self.b_size, self.c_out, l_out] } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum CudnnFwdAlgo { ImplicitGemm, ImplicitPrecompGemm, Gemm, Direct, Fft, FftTiling, Winograd, WinogradNonFused, Count, } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConv2D { pub(crate) b_size: usize, pub(crate) i_h: usize, pub(crate) i_w: usize, pub(crate) k_h: usize, pub(crate) k_w: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, pub cudnn_fwd_algo: Option<CudnnFwdAlgo>, } impl ParamsConv2D { pub(crate) fn out_h(&self) -> usize { (self.i_h + 2 * self.padding - self.dilation * (self.k_h - 1) - 1) / self.stride + 1 } pub(crate) fn out_w(&self) -> usize { (self.i_w + 2 * self.padding - self.dilation * (self.k_w - 1) - 1) / self.stride + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { vec![self.b_size, self.c_out, self.out_h(), self.out_w()] } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConvTranspose2D { pub(crate) b_size: usize, pub(crate) i_h: usize, pub(crate) i_w: usize, pub(crate) k_h: usize, pub(crate) k_w: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) padding: usize, pub(crate) output_padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConvTranspose2D { pub(crate) fn out_h(&self) -> usize { (self.i_h - 1) * self.stride + self.dilation * (self.k_h - 1) + self.output_padding + 1 - 2 * self.padding } pub(crate) fn out_w(&self) -> usize { (self.i_w - 1) * self.stride + self.dilation * (self.k_w - 1) + self.output_padding + 1 - 2 * self.padding } pub(crate) fn out_dims(&self) -> Vec<usize> { vec![self.b_size, self.c_out, self.out_h(), self.out_w()] } } impl Tensor { fn conv1d_single_group(&self, kernel: &Self, params: &ParamsConv1D) -> Result<Self> { let storage = self.storage() .conv1d(self.layout(), &kernel.storage(), kernel.layout(), params)?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::Conv1D { arg, kernel, padding: params.padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 1D convolution over the input tensor. pub fn conv1d( &self, kernel: &Self, padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (c_out, c_in_k, k_size) = kernel.dims3()?; let (b_size, c_in, l_in) = self.dims3()?; if c_in != c_in_k * groups { Err(Error::Conv1dInvalidArgs { inp_shape: self.shape().clone(), k_shape: kernel.shape().clone(), padding, stride, msg: "the number of in-channels on the input doesn't match the kernel size", } .bt())? } let params = ParamsConv1D { b_size, l_in, c_out: c_out / groups, c_in: c_in / groups, k_size, padding, stride, dilation, }; if groups == 1 { self.conv1d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv1d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } fn conv_transpose1d_single_group( &self, kernel: &Self, params: &ParamsConvTranspose1D, ) -> Result<Self> { let storage = self.storage().conv_transpose1d( self.layout(), &kernel.storage(), kernel.layout(), params, )?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::ConvTranspose1D { arg, kernel, padding: params.padding, output_padding: params.output_padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 1D transposed convolution over the input tensor. pub fn conv_transpose1d( &self, kernel: &Self, padding: usize, output_padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (c_in_k, c_out, k_size) = kernel.dims3()?; let (b_size, c_in, l_in) = self.dims3()?; if c_in != c_in_k { crate::bail!("in_channel mismatch between input ({c_in}) and kernel ({c_in_k})") } if c_in % groups != 0 { crate::bail!("in_channel {c_in} is not divisible by the number of groups") } let params = ParamsConvTranspose1D { b_size, l_in, k_size, c_out, c_in: c_in / groups, padding, output_padding, stride, dilation, }; if groups == 1 { self.conv_transpose1d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv_transpose1d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } fn conv2d_single_group(&self, kernel: &Self, params: &ParamsConv2D) -> Result<Self> { let storage = self.storage() .conv2d(self.layout(), &kernel.storage(), kernel.layout(), params)?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::Conv2D { arg, kernel, padding: params.padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 2D convolution over the input tensor. pub fn conv2d( &self, kernel: &Self, padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (b_size, c_in, i_h, i_w) = self.dims4()?; let (c_out, c_in_k, k_h, k_w) = kernel.dims4()?; if c_in != c_in_k * groups { crate::bail!( "in_channel mismatch between input ({c_in}, groups {groups}) and kernel ({c_in_k})" ) } let params = ParamsConv2D { b_size, i_h, i_w, k_h, k_w, c_out: c_out / groups, c_in: c_in / groups, padding, stride, dilation, cudnn_fwd_algo: None, }; if groups == 1 { self.conv2d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv2d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } /// Applies a 2D transposed convolution over the input tensor. pub fn conv_transpose2d( &self, kernel: &Self, padding: usize, output_padding: usize, stride: usize, dilation: usize, ) -> Result<Self> { let (b_size, c_in, i_h, i_w) = self.dims4()?; let (c_in_k, c_out, k_h, k_w) = kernel.dims4()?; if c_in != c_in_k { crate::bail!("in_channel mismatch between input ({c_in}) and kernel ({c_in_k})") } let params = ParamsConvTranspose2D { b_size, i_h, i_w, k_h, k_w, c_out, c_in, padding, output_padding, stride, dilation, }; let storage = self.storage().conv_transpose2d( self.layout(), &kernel.storage(), kernel.layout(), &params, )?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::ConvTranspose2D { arg, kernel, padding: params.padding, output_padding: params.output_padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } }
candle/candle-core/src/conv.rs/0
{ "file_path": "candle/candle-core/src/conv.rs", "repo_id": "candle", "token_count": 5821 }
use crate::backend::BackendDevice; use crate::cpu_backend::CpuDevice; use crate::{CpuStorage, DType, Result, Shape, Storage, WithDType}; /// A `DeviceLocation` represents a physical device whereas multiple `Device` /// can live on the same location (typically for cuda devices). #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum DeviceLocation { Cpu, Cuda { gpu_id: usize }, Metal { gpu_id: usize }, } /// Cpu, Cuda, or Metal #[derive(Debug, Clone)] pub enum Device { Cpu, Cuda(crate::CudaDevice), Metal(crate::MetalDevice), } pub trait NdArray { fn shape(&self) -> Result<Shape>; fn to_cpu_storage(&self) -> CpuStorage; } impl<S: WithDType> NdArray for S { fn shape(&self) -> Result<Shape> { Ok(Shape::from(())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(&[*self]) } } impl<S: WithDType, const N: usize> NdArray for &[S; N] { fn shape(&self) -> Result<Shape> { Ok(Shape::from(self.len())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(self.as_slice()) } } impl<S: WithDType> NdArray for &[S] { fn shape(&self) -> Result<Shape> { Ok(Shape::from(self.len())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(self) } } impl<S: WithDType, const N: usize, const M: usize> NdArray for &[[S; N]; M] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((M, N))) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage_owned(self.concat()) } } impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize> NdArray for &[[[S; N3]; N2]; N1] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((N1, N2, N3))) } fn to_cpu_storage(&self) -> CpuStorage { let mut vec = Vec::with_capacity(N1 * N2 * N3); for i1 in 0..N1 { for i2 in 0..N2 { vec.extend(self[i1][i2]) } } S::to_cpu_storage_owned(vec) } } impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize, const N4: usize> NdArray for &[[[[S; N4]; N3]; N2]; N1] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((N1, N2, N3, N4))) } fn to_cpu_storage(&self) -> CpuStorage { let mut vec = Vec::with_capacity(N1 * N2 * N3 * N4); for i1 in 0..N1 { for i2 in 0..N2 { for i3 in 0..N3 { vec.extend(self[i1][i2][i3]) } } } S::to_cpu_storage_owned(vec) } } impl<S: NdArray> NdArray for Vec<S> { fn shape(&self) -> Result<Shape> { if self.is_empty() { crate::bail!("empty array") } let shape0 = self[0].shape()?; let n = self.len(); for v in self.iter() { let shape = v.shape()?; if shape != shape0 { crate::bail!("two elements have different shapes {shape:?} {shape0:?}") } } Ok(Shape::from([[n].as_slice(), shape0.dims()].concat())) } fn to_cpu_storage(&self) -> CpuStorage { // This allocates intermediary memory and shouldn't be necessary. let storages = self.iter().map(|v| v.to_cpu_storage()).collect::<Vec<_>>(); CpuStorage::concat(storages.as_slice()).unwrap() } } impl Device { pub fn new_cuda(ordinal: usize) -> Result<Self> { Ok(Self::Cuda(crate::CudaDevice::new(ordinal)?)) } pub fn as_cuda_device(&self) -> Result<&crate::CudaDevice> { match self { Self::Cuda(d) => Ok(d), Self::Cpu => crate::bail!("expected a cuda device, got cpu"), Self::Metal(_) => crate::bail!("expected a cuda device, got Metal"), } } pub fn as_metal_device(&self) -> Result<&crate::MetalDevice> { match self { Self::Cuda(_) => crate::bail!("expected a metal device, got cuda"), Self::Cpu => crate::bail!("expected a metal device, got cpu"), Self::Metal(d) => Ok(d), } } pub fn new_cuda_with_stream(ordinal: usize) -> Result<Self> { Ok(Self::Cuda(crate::CudaDevice::new_with_stream(ordinal)?)) } pub fn new_metal(ordinal: usize) -> Result<Self> { Ok(Self::Metal(crate::MetalDevice::new(ordinal)?)) } pub fn set_seed(&self, seed: u64) -> Result<()> { match self { Self::Cpu => CpuDevice.set_seed(seed), Self::Cuda(c) => c.set_seed(seed), Self::Metal(m) => m.set_seed(seed), } } pub fn same_device(&self, rhs: &Self) -> bool { match (self, rhs) { (Self::Cpu, Self::Cpu) => true, (Self::Cuda(lhs), Self::Cuda(rhs)) => lhs.same_device(rhs), (Self::Metal(lhs), Self::Metal(rhs)) => lhs.same_device(rhs), _ => false, } } pub fn location(&self) -> DeviceLocation { match self { Self::Cpu => DeviceLocation::Cpu, Self::Cuda(device) => device.location(), Device::Metal(device) => device.location(), } } pub fn is_cpu(&self) -> bool { matches!(self, Self::Cpu) } pub fn is_cuda(&self) -> bool { matches!(self, Self::Cuda(_)) } pub fn is_metal(&self) -> bool { matches!(self, Self::Metal(_)) } pub fn supports_bf16(&self) -> bool { match self { Self::Cuda(_) | Self::Metal(_) => true, Self::Cpu => false, } } /// Return `BF16` for devices that support it, otherwise default to `F32`. pub fn bf16_default_to_f32(&self) -> DType { if self.supports_bf16() { DType::BF16 } else { DType::F32 } } pub fn cuda_if_available(ordinal: usize) -> Result<Self> { if crate::utils::cuda_is_available() { Self::new_cuda(ordinal) } else { Ok(Self::Cpu) } } pub(crate) fn rand_uniform_f64( &self, lo: f64, up: f64, shape: &Shape, dtype: DType, ) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { // TODO: Remove the special case if we start supporting generating f16/bf16 directly. if dtype == DType::F16 || dtype == DType::BF16 { let storage = device.rand_uniform(shape, DType::F32, lo, up)?; Storage::Cuda(storage).to_dtype(&crate::Layout::contiguous(shape), dtype) } else { let storage = device.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Cuda(storage)) } } Device::Metal(device) => { let storage = device.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn rand_uniform<T: crate::FloatDType>( &self, lo: T, up: T, shape: &Shape, ) -> Result<Storage> { self.rand_uniform_f64(lo.to_f64(), up.to_f64(), shape, T::DTYPE) } pub(crate) fn rand_normal_f64( &self, mean: f64, std: f64, shape: &Shape, dtype: DType, ) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { // TODO: Remove the special case if we start supporting generating f16/bf16 directly. if dtype == DType::F16 || dtype == DType::BF16 { let storage = device.rand_normal(shape, DType::F32, mean, std)?; Storage::Cuda(storage).to_dtype(&crate::Layout::contiguous(shape), dtype) } else { let storage = device.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Cuda(storage)) } } Device::Metal(device) => { let storage = device.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn rand_normal<T: crate::FloatDType>( &self, mean: T, std: T, shape: &Shape, ) -> Result<Storage> { self.rand_normal_f64(mean.to_f64(), std.to_f64(), shape, T::DTYPE) } pub(crate) fn ones(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.ones_impl(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.ones_impl(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.ones_impl(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn zeros(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.zeros_impl(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.zeros_impl(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.zeros_impl(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) unsafe fn alloc_uninit(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.alloc_uninit(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.alloc_uninit(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.alloc_uninit(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage_from_slice<D: WithDType>(&self, data: &[D]) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(data.to_cpu_storage())), Device::Cuda(device) => { let storage = device.storage_from_slice(data)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.storage_from_slice(data)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage<A: NdArray>(&self, array: A) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(array.to_cpu_storage())), Device::Cuda(device) => { let storage = array.to_cpu_storage(); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = array.to_cpu_storage(); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage_owned<S: WithDType>(&self, data: Vec<S>) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(S::to_cpu_storage_owned(data))), Device::Cuda(device) => { let storage = S::to_cpu_storage_owned(data); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = S::to_cpu_storage_owned(data); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Metal(storage)) } } } pub fn synchronize(&self) -> Result<()> { match self { Self::Cpu => Ok(()), Self::Cuda(d) => d.synchronize(), Self::Metal(d) => d.synchronize(), } } }
candle/candle-core/src/device.rs/0
{ "file_path": "candle/candle-core/src/device.rs", "repo_id": "candle", "token_count": 6435 }
use super::{GgmlDType, QStorage}; use crate::quantized::k_quants::GgmlType; use crate::{backend::BackendDevice, cuda_backend::WrapErr}; use crate::{CudaDevice, CudaStorage, Result}; use half::f16; use cudarc::driver::{CudaSlice, CudaView, DeviceSlice}; #[derive(Clone, Debug)] struct PaddedCudaSlice { inner: CudaSlice<u8>, len: usize, } #[derive(Clone, Debug)] pub struct QCudaStorage { data: PaddedCudaSlice, dtype: GgmlDType, device: CudaDevice, } static FORCE_DMMV: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false); pub fn set_force_dmmv(f: bool) { FORCE_DMMV.store(f, std::sync::atomic::Ordering::Relaxed) } pub const WARP_SIZE: usize = 32; pub const MMQ_X_Q4_0_AMPERE: usize = 4; pub const MMQ_Y_Q4_0_AMPERE: usize = 32; pub const NWARPS_Q4_0_AMPERE: usize = 4; pub const GGML_CUDA_MMV_X: usize = 32; pub const GGML_CUDA_MMV_Y: usize = 1; pub const CUDA_QUANTIZE_BLOCK_SIZE: usize = 256; pub const CUDA_DEQUANTIZE_BLOCK_SIZE: usize = 256; pub const MATRIX_ROW_PADDING: usize = 512; fn ceil_div(p: usize, q: usize) -> usize { p.div_ceil(q) } fn pad(p: usize, q: usize) -> usize { ceil_div(p, q) * q } fn quantize_q8_1( src: &CudaView<f32>, dst: &mut CudaSlice<u8>, elem_count: usize, ky: usize, dev: &CudaDevice, ) -> Result<()> { use cudarc::driver::LaunchAsync; let kx = elem_count; let kx_padded = pad(kx, MATRIX_ROW_PADDING); let num_blocks = ceil_div(kx_padded, CUDA_QUANTIZE_BLOCK_SIZE); let func = dev.get_or_load_func("quantize_q8_1", candle_kernels::QUANTIZED)?; let cfg = cudarc::driver::LaunchConfig { grid_dim: (num_blocks as u32, ky as u32, 1), block_dim: (CUDA_QUANTIZE_BLOCK_SIZE as u32, 1, 1), shared_mem_bytes: 0, }; let params = (src, dst, kx as i32, kx_padded as i32); unsafe { func.launch(cfg, params) }.w()?; Ok(()) } fn dequantize_f32( data: &PaddedCudaSlice, dtype: GgmlDType, elem_count: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let nb = (elem_count + 255) / 256; let (kernel_name, is_k, block_dim, num_blocks) = match dtype { GgmlDType::Q4_0 => ("dequantize_block_q4_0_f32", false, 32, nb), GgmlDType::Q4_1 => ("dequantize_block_q4_1_f32", false, 32, nb), GgmlDType::Q5_0 => ( "dequantize_block_q5_0_f32", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q5_1 => ( "dequantize_block_q5_1_f32", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q8_0 => ("dequantize_block_q8_0_f32", false, 32, nb), GgmlDType::Q2K => ("dequantize_block_q2_K_f32", true, 64, nb), GgmlDType::Q3K => ("dequantize_block_q3_K_f32", true, 64, nb), GgmlDType::Q4K => ("dequantize_block_q4_K_f32", true, 32, nb), GgmlDType::Q5K => ("dequantize_block_q5_K_f32", true, 64, nb), GgmlDType::Q6K => ("dequantize_block_q6_K_f32", true, 64, nb), GgmlDType::Q8K => ("dequantize_block_q8_K_f32", true, 32, nb), _ => crate::bail!("unsupported dtype for dequantize {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(elem_count).w()? }; // See e.g. // https://github.com/ggerganov/llama.cpp/blob/cbbd1efa06f8c09f9dff58ff9d9af509cc4c152b/ggml-cuda.cu#L7270 let cfg = cudarc::driver::LaunchConfig { grid_dim: (num_blocks as u32, 1, 1), block_dim: (block_dim as u32, 1, 1), shared_mem_bytes: 0, }; if is_k { let params = (&data.inner, &dst); unsafe { func.launch(cfg, params) }.w()?; } else { let nb32 = match dtype { GgmlDType::Q5_0 | GgmlDType::Q5_1 => elem_count, _ => elem_count / 32, }; let params = (&data.inner, &dst, nb32 as i32); unsafe { func.launch(cfg, params) }.w()?; } Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } fn dequantize_f16( data: &PaddedCudaSlice, dtype: GgmlDType, elem_count: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let nb = (elem_count + 255) / 256; let (kernel_name, is_k, block_dim, num_blocks) = match dtype { GgmlDType::Q4_0 => ("dequantize_block_q4_0_f16", false, 32, nb), GgmlDType::Q4_1 => ("dequantize_block_q4_1_f16", false, 32, nb), GgmlDType::Q5_0 => ( "dequantize_block_q5_0_f16", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q5_1 => ( "dequantize_block_q5_1_f16", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q8_0 => ("dequantize_block_q8_0_f16", false, 32, nb), GgmlDType::Q2K => ("dequantize_block_q2_K_f16", true, 64, nb), GgmlDType::Q3K => ("dequantize_block_q3_K_f16", true, 64, nb), GgmlDType::Q4K => ("dequantize_block_q4_K_f16", true, 32, nb), GgmlDType::Q5K => ("dequantize_block_q5_K_f16", true, 64, nb), GgmlDType::Q6K => ("dequantize_block_q6_K_f16", true, 64, nb), GgmlDType::Q8K => ("dequantize_block_q8_K_f16", true, 32, nb), _ => crate::bail!("unsupported dtype for dequantize {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f16>(elem_count).w()? }; // See e.g. // https://github.com/ggerganov/llama.cpp/blob/cbbd1efa06f8c09f9dff58ff9d9af509cc4c152b/ggml-cuda.cu#L7270 let cfg = cudarc::driver::LaunchConfig { grid_dim: (num_blocks as u32, 1, 1), block_dim: (block_dim as u32, 1, 1), shared_mem_bytes: 0, }; if is_k { let params = (&data.inner, &dst); unsafe { func.launch(cfg, params) }.w()?; } else { let nb32 = match dtype { GgmlDType::Q5_0 | GgmlDType::Q5_1 => elem_count, _ => elem_count / 32, }; let params = (&data.inner, &dst, nb32 as i32); unsafe { func.launch(cfg, params) }.w()?; } Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } fn dequantize_mul_mat_vec( data: &PaddedCudaSlice, y: &CudaView<f32>, dtype: GgmlDType, ncols: usize, nrows: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let data_elems = data.len / dtype.type_size() * dtype.block_size(); if data_elems < ncols * nrows { crate::bail!("unexpected data size {}, ncols {ncols} {nrows}", data_elems) } if y.len() != ncols { crate::bail!("unexpected y size {}, ncols {ncols} {nrows}", y.len()) } let kernel_name = match dtype { GgmlDType::Q4_0 => "dequantize_mul_mat_vec_q4_0_cuda", GgmlDType::Q4_1 => "dequantize_mul_mat_vec_q4_1_cuda", GgmlDType::Q5_0 => "dequantize_mul_mat_vec_q5_0_cuda", GgmlDType::Q5_1 => "dequantize_mul_mat_vec_q5_1_cuda", GgmlDType::Q8_0 => "dequantize_mul_mat_vec_q8_0_cuda", GgmlDType::Q2K => "dequantize_mul_mat_vec_q2_k", GgmlDType::Q3K => "dequantize_mul_mat_vec_q3_k", GgmlDType::Q4K => "dequantize_mul_mat_vec_q4_k", GgmlDType::Q5K => "dequantize_mul_mat_vec_q5_k", GgmlDType::Q6K => "dequantize_mul_mat_vec_q6_k", _ => crate::bail!("unsupported dtype for quantized matmul {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(nrows).w()? }; let block_num_y = ceil_div(nrows, GGML_CUDA_MMV_Y); let cfg = cudarc::driver::LaunchConfig { grid_dim: (block_num_y as u32, 1, 1), block_dim: (WARP_SIZE as u32, GGML_CUDA_MMV_Y as u32, 1), shared_mem_bytes: 0, }; let params = (&data.inner, y, &dst, ncols as i32, nrows as i32); unsafe { func.launch(cfg, params) }.w()?; Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } fn mul_mat_vec_via_q8_1( data: &PaddedCudaSlice, y: &CudaView<f32>, dtype: GgmlDType, ncols: usize, nrows: usize, b_size: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let data_elems = data.len / dtype.type_size() * dtype.block_size(); if data_elems < ncols * nrows { crate::bail!("unexpected data size {}, ncols {ncols} {nrows}", data_elems) } if y.len() != ncols * b_size { crate::bail!("unexpected y size {}, ncols {ncols} {nrows}", y.len()) } if b_size == 0 || b_size > 8 { crate::bail!("only bsize between 1 and 8 are supported, got {b_size}") } // Start by quantizing y let ncols_padded = pad(ncols, MATRIX_ROW_PADDING); let y_size_in_bytes = b_size * ncols_padded * GgmlDType::Q8_1.type_size() / GgmlDType::Q8_1.block_size(); let mut y_q8_1 = unsafe { dev.alloc::<u8>(y_size_in_bytes).w()? }; quantize_q8_1(y, &mut y_q8_1, ncols, b_size, dev)?; let kernel_name = match dtype { GgmlDType::Q4_0 => "mul_mat_vec_q4_0_q8_1_cuda", GgmlDType::Q4_1 => "mul_mat_vec_q4_1_q8_1_cuda", GgmlDType::Q5_0 => "mul_mat_vec_q5_0_q8_1_cuda", GgmlDType::Q5_1 => "mul_mat_vec_q5_1_q8_1_cuda", GgmlDType::Q8_0 => "mul_mat_vec_q8_0_q8_1_cuda", GgmlDType::Q2K => "mul_mat_vec_q2_K_q8_1_cuda", GgmlDType::Q3K => "mul_mat_vec_q3_K_q8_1_cuda", GgmlDType::Q4K => "mul_mat_vec_q4_K_q8_1_cuda", GgmlDType::Q5K => "mul_mat_vec_q5_K_q8_1_cuda", GgmlDType::Q6K => "mul_mat_vec_q6_K_q8_1_cuda", _ => crate::bail!("unsupported dtype for quantized matmul {dtype:?}"), }; let kernel_name = format!("{kernel_name}{b_size}"); let func = dev.get_or_load_func(&kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(nrows * b_size).w()? }; // https://github.com/ggerganov/llama.cpp/blob/facb8b56f8fd3bb10a693bf0943ae9d69d0828ef/ggml-cuda/mmvq.cu#L98 let (nblocks, nwarps) = match b_size { 1 => (nrows as u32, 4), 2..=4 => ((nrows as u32 + 1) / 2, 4), 5..=8 => ((nrows as u32 + 1) / 2, 2), _ => crate::bail!("unexpected bsize {b_size}"), }; let cfg = cudarc::driver::LaunchConfig { grid_dim: (nblocks, 1, 1), block_dim: (WARP_SIZE as u32, nwarps, 1), shared_mem_bytes: 0, }; let params = ( &data.inner, &y_q8_1, &dst, /* ncols_x */ ncols as i32, /* nrows_x */ nrows as i32, /* nrows_y */ ncols_padded as i32, /* nrows_dst */ nrows as i32, ); unsafe { func.launch(cfg, params) }.w()?; Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } #[allow(clippy::too_many_arguments)] fn mul_mat_via_q8_1( data: &PaddedCudaSlice, y: &CudaView<f32>, dtype: GgmlDType, x_rows: usize, x_cols: usize, y_rows: usize, y_cols: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let data_elems = data.len / dtype.type_size() * dtype.block_size(); if data_elems < x_rows * x_cols { crate::bail!("unexpected lhs size {}, {x_rows} {x_cols}", data_elems) } if y.len() != y_rows * y_cols { crate::bail!("unexpected y size {}, {y_rows} {y_cols}", y.len()) } if x_cols != y_rows { crate::bail!("unexpected x/y size {x_rows} {x_cols} {y_rows} {y_cols}") } let k = x_cols; // Start by quantizing y let k_padded = pad(k, MATRIX_ROW_PADDING); let y_size_in_bytes = k_padded * y_cols * GgmlDType::Q8_1.type_size() / GgmlDType::Q8_1.block_size(); let mut y_q8_1 = unsafe { dev.alloc::<u8>(y_size_in_bytes).w()? }; quantize_q8_1(y, &mut y_q8_1, k, y_cols, dev)?; let (kernel_name, mmq_x, mmq_y) = match dtype { GgmlDType::Q4_0 => ("mul_mat_q4_0", 64, 128), GgmlDType::Q4_1 => ("mul_mat_q4_1", 64, 128), GgmlDType::Q5_0 => ("mul_mat_q5_0", 128, 64), GgmlDType::Q5_1 => ("mul_mat_q5_1", 128, 64), GgmlDType::Q8_0 => ("mul_mat_q8_0", 128, 64), GgmlDType::Q2K => ("mul_mat_q2_K", 64, 128), GgmlDType::Q3K => ("mul_mat_q3_K", 128, 128), GgmlDType::Q4K => ("mul_mat_q4_K", 64, 128), GgmlDType::Q5K => ("mul_mat_q5_K", 64, 128), GgmlDType::Q6K => ("mul_mat_q6_K", 64, 64), _ => crate::bail!("unsupported dtype for quantized matmul {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(x_rows * y_cols).w()? }; let cfg = cudarc::driver::LaunchConfig { grid_dim: ( ceil_div(x_rows, mmq_y) as u32, ceil_div(y_cols, mmq_x) as u32, 1, ), block_dim: (WARP_SIZE as u32, 4, 1), shared_mem_bytes: 0, }; let params = ( /* vx */ &data.inner, /* vy */ &y_q8_1, /* dst */ &dst, /* ncols_x */ x_cols as i32, /* nrows_x */ x_rows as i32, /* ncols_y */ y_cols as i32, /* nrows_y */ k_padded as i32, /* nrows_dst */ x_rows as i32, ); unsafe { func.launch(cfg, params) }.w()?; Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } impl QCudaStorage { pub fn zeros(device: &CudaDevice, el_count: usize, dtype: GgmlDType) -> Result<Self> { let size_in_bytes = ceil_div(el_count, dtype.block_size()) * dtype.type_size(); let padded_size_in_bytes = ceil_div(el_count + MATRIX_ROW_PADDING, dtype.block_size()) * dtype.type_size(); let inner = device.alloc_zeros::<u8>(padded_size_in_bytes).w()?; Ok(QCudaStorage { data: PaddedCudaSlice { inner, len: size_in_bytes, }, device: device.clone(), dtype, }) } pub fn dtype(&self) -> GgmlDType { self.dtype } pub fn device(&self) -> &CudaDevice { &self.device } pub fn dequantize(&self, elem_count: usize) -> Result<CudaStorage> { fn deq<T: GgmlType>(buffer: &[u8], n: usize, dst: &mut [f32]) -> Result<()> { let slice = unsafe { std::slice::from_raw_parts(buffer.as_ptr() as *const T, n) }; let vec = slice.to_vec(); T::to_float(&vec, dst) } let fast_kernel = matches!( self.dtype, GgmlDType::Q4_0 | GgmlDType::Q4_1 | GgmlDType::Q5_0 | GgmlDType::Q5_1 | GgmlDType::Q8_0 | GgmlDType::Q2K | GgmlDType::Q3K | GgmlDType::Q4K | GgmlDType::Q5K | GgmlDType::Q6K | GgmlDType::Q8K ); if fast_kernel { return dequantize_f32(&self.data, self.dtype, elem_count, self.device()); } // Run the dequantization on cpu. let buffer = self .device .dtoh_sync_copy(&self.data.inner.slice(..self.data.len)) .w()?; let mut out = vec![0.0; elem_count]; let block_len = elem_count / self.dtype.block_size(); match self.dtype { GgmlDType::F32 => deq::<f32>(&buffer, block_len, &mut out)?, GgmlDType::F16 => deq::<half::f16>(&buffer, block_len, &mut out)?, GgmlDType::Q4_0 => deq::<crate::quantized::BlockQ4_0>(&buffer, block_len, &mut out)?, GgmlDType::Q4_1 => deq::<crate::quantized::BlockQ4_1>(&buffer, block_len, &mut out)?, GgmlDType::Q5_0 => deq::<crate::quantized::BlockQ5_0>(&buffer, block_len, &mut out)?, GgmlDType::Q5_1 => deq::<crate::quantized::BlockQ5_1>(&buffer, block_len, &mut out)?, GgmlDType::Q8_0 => deq::<crate::quantized::BlockQ8_0>(&buffer, block_len, &mut out)?, GgmlDType::Q8_1 => deq::<crate::quantized::BlockQ8_1>(&buffer, block_len, &mut out)?, GgmlDType::Q2K => deq::<crate::quantized::BlockQ2K>(&buffer, block_len, &mut out)?, GgmlDType::Q3K => deq::<crate::quantized::BlockQ3K>(&buffer, block_len, &mut out)?, GgmlDType::Q4K => deq::<crate::quantized::BlockQ4K>(&buffer, block_len, &mut out)?, GgmlDType::Q5K => deq::<crate::quantized::BlockQ5K>(&buffer, block_len, &mut out)?, GgmlDType::Q6K => deq::<crate::quantized::BlockQ6K>(&buffer, block_len, &mut out)?, GgmlDType::Q8K => deq::<crate::quantized::BlockQ8K>(&buffer, block_len, &mut out)?, } self.device .storage_from_cpu_storage(&crate::CpuStorage::F32(out)) } pub fn dequantize_f16(&self, elem_count: usize) -> Result<CudaStorage> { dequantize_f16(&self.data, self.dtype, elem_count, self.device()) } pub fn quantize(&mut self, src: &CudaStorage) -> Result<()> { // Run the quantization on cpu. let src = match &src.slice { crate::cuda_backend::CudaStorageSlice::F32(data) => { self.device.dtoh_sync_copy(data).w()? } _ => crate::bail!("only f32 can be quantized"), }; let src_len = src.len(); let src = crate::Storage::Cpu(crate::CpuStorage::F32(src)); let mut qcpu_storage = crate::Device::Cpu.qzeros(src_len, self.dtype)?; qcpu_storage.quantize(&src)?; let data = qcpu_storage.data()?; let padded_len = data.len() + MATRIX_ROW_PADDING * self.dtype.type_size() / self.dtype.block_size(); let mut inner = unsafe { self.device.alloc::<u8>(padded_len).w()? }; self.device .htod_sync_copy_into(data.as_ref(), &mut inner.slice_mut(..data.len())) .w()?; self.data = PaddedCudaSlice { inner, len: data.len(), }; Ok(()) } pub fn storage_size_in_bytes(&self) -> usize { self.data.len } pub fn fwd( &self, self_shape: &crate::Shape, storage: &CudaStorage, layout: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { let max_bm = if FORCE_DMMV.load(std::sync::atomic::Ordering::Relaxed) { 1 } else { 8 }; let use_vec_kernel = match layout.shape().dims() { [b, m, _k] => b * m <= max_bm, [b, _k] => *b <= max_bm, _ => false, }; if use_vec_kernel { self.dequantize_matmul_vec(self_shape, storage, layout) } else { self.dequantize_matmul(self_shape, storage, layout) } } } impl QCudaStorage { fn dequantize_matmul_vec( &self, self_shape: &crate::Shape, rhs: &CudaStorage, rhs_l: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { let (nrows, ncols) = self_shape.dims2()?; let rhs = rhs.as_cuda_slice::<f32>()?; let rhs = match rhs_l.contiguous_offsets() { Some((o1, o2)) => rhs.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "dmmv" }.bt())?, }; let (b_size, k) = match rhs_l.shape().dims() { [b, m, k] => (b * m, *k), [b, k] => (*b, *k), _ => crate::bail!("unexpected rhs shape in dmmv {:?}", rhs_l.shape()), }; if ncols != k { crate::bail!("mismatch on matmul dim {self_shape:?} {:?}", rhs_l.shape()) } let out = if FORCE_DMMV.load(std::sync::atomic::Ordering::Relaxed) { dequantize_mul_mat_vec(&self.data, &rhs, self.dtype, ncols, nrows, self.device())? } else { mul_mat_vec_via_q8_1( &self.data, &rhs, self.dtype, ncols, nrows, b_size, self.device(), )? }; let mut out_shape = rhs_l.shape().dims().to_vec(); out_shape.pop(); out_shape.push(nrows); Ok((out, out_shape.into())) } fn dequantize_matmul( &self, self_shape: &crate::Shape, storage: &CudaStorage, layout: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { use crate::backend::BackendStorage; let (n, k) = self_shape.dims2()?; let (b, m, k2) = match layout.shape().dims() { &[b, m, k2] => (b, m, k2), &[m, k2] => (1, m, k2), s => crate::bail!("unexpected shape for input {s:?}"), }; if k2 != k { crate::bail!("mismatch on matmul dim {self_shape:?} {:?}", layout.shape()) } let out = if FORCE_DMMV.load(std::sync::atomic::Ordering::Relaxed) { let data_f32 = self.dequantize(n * k)?; let rhs_l = crate::Layout::new((k, n).into(), vec![1, k], 0).broadcast_as((b, k, n))?; storage.matmul(&data_f32, (b, m, n, k), layout, &rhs_l)? } else { let storage = storage.as_cuda_slice::<f32>()?; let storage = match layout.contiguous_offsets() { Some((o1, o2)) => storage.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "quantized-matmul", } .bt())?, }; mul_mat_via_q8_1( &self.data, &storage, self.dtype, /* x_rows */ n, /* x_cols */ k, /* y_rows */ k, /* y_cols */ b * m, self.device(), )? }; let mut out_shape = layout.shape().dims().to_vec(); out_shape.pop(); out_shape.push(n); Ok((out, out_shape.into())) } } pub fn load_quantized<T: super::GgmlType + Send + Sync + 'static>( device: &CudaDevice, data: &[T], ) -> Result<super::QStorage> { let data = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const u8, core::mem::size_of_val(data)) }; let dtype = T::DTYPE; let padded_len = data.len() + MATRIX_ROW_PADDING * dtype.type_size() / dtype.block_size(); let mut inner = unsafe { device.alloc::<u8>(padded_len).w()? }; device .htod_sync_copy_into(data, &mut inner.slice_mut(..data.len())) .w()?; Ok(QStorage::Cuda(QCudaStorage { data: PaddedCudaSlice { inner, len: data.len(), }, device: device.clone(), dtype, })) } #[cfg(test)] mod test { use super::*; #[test] fn cuda_quantize_q8_1() -> Result<()> { let dev = CudaDevice::new(0)?; let el = 256; let el_padded = pad(el, MATRIX_ROW_PADDING); let y_size_in_bytes = el_padded * GgmlDType::Q8_1.type_size() / GgmlDType::Q8_1.block_size(); let mut y_q8_1 = unsafe { dev.alloc::<u8>(y_size_in_bytes).w()? }; let vs: Vec<f32> = (0..el).map(|v| v as f32).collect(); let y = dev.htod_sync_copy(&vs).w()?; quantize_q8_1(&y.slice(..), &mut y_q8_1, el, 1, &dev)?; Ok(()) } #[test] fn cuda_mmv_q8_1() -> Result<()> { let dev = CudaDevice::new(0)?; let ncols = 256; let vs: Vec<f32> = (0..ncols).map(|v| v as f32).collect(); let y = dev.htod_sync_copy(&vs).w()?; let mut xs = QCudaStorage::zeros(&dev, ncols, GgmlDType::Q4_0)?; xs.quantize(&CudaStorage::wrap_cuda_slice(y.clone(), dev.clone()))?; let cuda_storage = mul_mat_vec_via_q8_1( &xs.data, &y.slice(..), /* dtype */ GgmlDType::Q4_0, /* ncols */ ncols, /* nrows */ 1, /* b_size */ 1, &dev, )?; let vs = cuda_storage.as_cuda_slice::<f32>()?; let vs = dev.dtoh_sync_copy(&vs.slice(..)).unwrap(); assert_eq!(vs.len(), 1); // for n = 255, n.(n+1).(2n+1) / 6 = 5559680 // Q8 means 1/256 precision. assert_eq!(vs[0], 5561664.5); let cuda_storage = dequantize_mul_mat_vec( &xs.data, &y.slice(..), /* dtype */ GgmlDType::Q4_0, /* ncols */ ncols, /* nrows */ 1, &dev, )?; let vs = cuda_storage.as_cuda_slice::<f32>()?; let vs = dev.dtoh_sync_copy(&vs.slice(..)).unwrap(); assert_eq!(vs.len(), 1); assert_eq!(vs[0], 5561851.0); Ok(()) } #[test] fn cuda_mm_q8_1() -> Result<()> { let dev = CudaDevice::new(0)?; let ncols = 256; let vs: Vec<f32> = (0..ncols * 4).map(|v| v as f32 / 4.).collect(); let y = dev.htod_sync_copy(&vs).w()?; let mut xs = QCudaStorage::zeros(&dev, ncols * 4, GgmlDType::Q4_0)?; xs.quantize(&CudaStorage::wrap_cuda_slice(y.clone(), dev.clone()))?; let cuda_storage = mul_mat_via_q8_1( &xs.data, &y.slice(..), /* dtype */ GgmlDType::Q4_0, /* x_rows */ 4, /* x_cols */ ncols, /* y_rows */ ncols, /* y_cols */ 4, &dev, )?; let vs = cuda_storage.as_cuda_slice::<f32>()?; let vs = dev.dtoh_sync_copy(&vs.slice(..)).unwrap(); /* x = torch.tensor([float(v) for v in range(1024)]).reshape(4, 256) x @ x.t() / 16 tensor([[ 347480.0000, 869720.0000, 1391960.0000, 1914200.0000], [ 869720.0000, 2440536.0000, 4011352.0000, 5582166.5000], [ 1391960.0000, 4011352.0000, 6630742.0000, 9250132.0000], [ 1914200.0000, 5582166.5000, 9250132.0000, 12918099.0000]]) */ assert_eq!(vs.len(), 16); assert_eq!(vs[0], 347604.0); assert_eq!(vs[1], 888153.06); assert_eq!(vs[4], 869780.7); assert_eq!(vs[5], 2483145.0); assert_eq!(vs[11], 9407368.0); assert_eq!(vs[14], 9470856.0); assert_eq!(vs[15], 13138824.0); Ok(()) } // The following test used to fail under compute-sanitizer until #2526. #[test] fn cuda_mm_q8_1_pad() -> Result<()> { let dev = CudaDevice::new(0)?; let (x_rows, ncols, y_cols) = (4, 16, 2048); let vs: Vec<f32> = (0..ncols * y_cols).map(|v| v as f32 / 256.).collect(); let y = dev.htod_sync_copy(&vs).w()?; let mut xs = QCudaStorage::zeros(&dev, ncols * x_rows, GgmlDType::Q4_0)?; xs.quantize(&CudaStorage::wrap_cuda_slice(y.clone(), dev.clone()))?; let cuda_storage = mul_mat_via_q8_1( &xs.data, &y.slice(..), /* dtype */ GgmlDType::Q4_0, /* x_rows */ x_rows, /* x_cols */ ncols, /* y_rows */ ncols, /* y_cols */ y_cols, &dev, )?; let vs = cuda_storage.as_cuda_slice::<f32>()?; let _vs = dev.dtoh_sync_copy(&vs.slice(..)).unwrap(); Ok(()) } }
candle/candle-core/src/quantized/cuda.rs/0
{ "file_path": "candle/candle-core/src/quantized/cuda.rs", "repo_id": "candle", "token_count": 14789 }
//! StreamTensror useful for streaming ops. //! use crate::{Result, Shape, Tensor}; pub trait Dim: crate::shape::Dim + Copy {} impl<T: crate::shape::Dim + Copy> Dim for T {} /// A stream tensor is used in streaming module. It can either contain an actual tensor or be /// empty. #[derive(Clone)] pub struct StreamTensor(Option<Tensor>); impl std::fmt::Debug for StreamTensor { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.0 { Some(t) => write!(f, "{:?}", t.shape()), None => write!(f, "Empty"), } } } impl std::convert::From<Option<Tensor>> for StreamTensor { fn from(value: Option<Tensor>) -> Self { Self(value) } } impl std::convert::From<Tensor> for StreamTensor { fn from(value: Tensor) -> Self { Self(Some(value)) } } impl std::convert::From<()> for StreamTensor { fn from(_value: ()) -> Self { Self(None) } } impl StreamTensor { pub fn empty() -> Self { Self(None) } pub fn from_tensor(tensor: Tensor) -> Self { Self(Some(tensor)) } pub fn shape(&self) -> Option<&Shape> { self.0.as_ref().map(|t| t.shape()) } pub fn cat2<D: Dim>(&self, rhs: &Self, dim: D) -> Result<Self> { let xs = match (&self.0, &rhs.0) { (Some(lhs), Some(rhs)) => { let xs = Tensor::cat(&[lhs, rhs], dim)?; Some(xs) } (Some(xs), None) | (None, Some(xs)) => Some(xs.clone()), (None, None) => None, }; Ok(Self(xs)) } pub fn seq_len<D: Dim>(&self, dim: D) -> Result<usize> { match &self.0 { None => Ok(0), Some(v) => v.dim(dim), } } pub fn reset(&mut self) { self.0 = None } pub fn narrow<D: Dim>(&self, dim: D, offset: usize, len: usize) -> Result<StreamTensor> { let t = match &self.0 { None => None, Some(t) => { let seq_len = t.dim(dim)?; if seq_len <= offset { None } else { let t = t.narrow(dim, offset, usize::min(len, seq_len - offset))?; Some(t) } } }; Ok(Self(t)) } /// Splits the Streaming Tensor on the time axis `dim` with the first `lhs_len` elements /// returned in the first output and the remaining in the second output. pub fn split<D: Dim>(&self, dim: D, lhs_len: usize) -> Result<(Self, Self)> { match &self.0 { None => Ok((Self::empty(), Self::empty())), Some(t) => { let seq_len = t.dim(dim)?; let lhs_len = usize::min(seq_len, lhs_len); if lhs_len == 0 { Ok((Self::empty(), t.clone().into())) } else { let lhs = Self::from_tensor(t.narrow(dim, 0, lhs_len)?); let rhs_len = seq_len - lhs_len; let rhs = if rhs_len == 0 { Self::empty() } else { Self::from_tensor(t.narrow(dim, lhs_len, rhs_len)?) }; Ok((lhs, rhs)) } } } } pub fn as_option(&self) -> Option<&Tensor> { self.0.as_ref() } pub fn apply<M: crate::Module>(&self, m: &M) -> Result<Self> { match &self.0 { None => Ok(Self::empty()), Some(t) => Ok(Self::from_tensor(t.apply(m)?)), } } } /// Streaming modules take as input a stream tensor and return a stream tensor. They may perform /// some internal buffering so that enough data has been received for the module to be able to /// perform some operations. pub trait StreamingModule { // TODO: Should we also have a flush method? fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor>; fn reset_state(&mut self); } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum BinOp { Add, Mul, Sub, Div, } #[derive(Debug, Clone)] pub struct StreamingBinOp { prev_lhs: StreamTensor, prev_rhs: StreamTensor, pub op: BinOp, pub dim: crate::D, } impl StreamingBinOp { pub fn new(op: BinOp, dim: crate::D) -> Self { Self { prev_lhs: StreamTensor::empty(), prev_rhs: StreamTensor::empty(), op, dim, } } pub fn reset_state(&mut self) { self.prev_lhs.reset(); self.prev_rhs.reset(); } pub fn forward(&self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor> { match self.op { BinOp::Add => Tensor::add(lhs, rhs), BinOp::Mul => Tensor::mul(lhs, rhs), BinOp::Sub => Tensor::sub(lhs, rhs), BinOp::Div => Tensor::div(lhs, rhs), } } pub fn step(&mut self, lhs: &StreamTensor, rhs: &StreamTensor) -> Result<StreamTensor> { let lhs = StreamTensor::cat2(&self.prev_lhs, lhs, self.dim)?; let rhs = StreamTensor::cat2(&self.prev_rhs, rhs, self.dim)?; let lhs_len = lhs.seq_len(self.dim)?; let rhs_len = rhs.seq_len(self.dim)?; let common_len = usize::min(lhs_len, rhs_len); let (lhs, prev_lhs) = lhs.split(self.dim, common_len)?; let (rhs, prev_rhs) = rhs.split(self.dim, common_len)?; let ys = match (lhs.0, rhs.0) { (Some(lhs), Some(rhs)) => { let ys = self.forward(&lhs, &rhs)?; StreamTensor::from_tensor(ys) } (None, None) => StreamTensor::empty(), (lhs, rhs) => crate::bail!("INTERNAL ERROR inconsistent lhs and rhs {lhs:?} {rhs:?}"), }; self.prev_lhs = prev_lhs; self.prev_rhs = prev_rhs; Ok(ys) } } /// Simple wrapper that doesn't do any buffering. pub struct Map<T: crate::Module>(T); impl<T: crate::Module> StreamingModule for Map<T> { fn reset_state(&mut self) {} fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { xs.apply(&self.0) } }
candle/candle-core/src/streaming.rs/0
{ "file_path": "candle/candle-core/src/streaming.rs", "repo_id": "candle", "token_count": 3130 }
use candle_core::{test_device, test_utils, Device, IndexOp, Result, Tensor}; // https://github.com/huggingface/candle/issues/364 fn avg_pool2d(dev: &Device) -> Result<()> { let data: Vec<f32> = vec![ 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[0.5f32, 1.], [1., 1.]]); let data: Vec<f32> = vec![ 1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 2, 8), dev)?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[5. / 4., 6. / 4., 6. / 4., 1.]]); Ok(()) } fn max_pool2d(dev: &Device) -> Result<()> { let data: Vec<f32> = vec![ 1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?; let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[2f32, 3.], [5., 1.]]); let t = t.reshape((1, 1, 2, 8))?; let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[2.0, 3.0, 5.0, 1.0]]); Ok(()) } /* This test corresponds to the following PyTorch script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 4, 4)) print(t.flatten()) res = torch.nn.functional.avg_pool2d(t, 2) print(res) */ fn avg_pool2d_pytorch(dev: &Device) -> Result<()> { if dev.is_metal() { return Ok(()); } let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, ], dev, )? .reshape((1, 2, 4, 4))?; let pool = t.avg_pool2d(2)?.squeeze(0)?; assert_eq!( test_utils::to_vec3_round(&pool, 4)?, [ [[-1.1926, -0.0395], [0.2688, 0.1871]], [[0.1835, -0.1606], [0.6249, 0.3217]] ] ); let pool = t.avg_pool2d(3)?.squeeze(0)?; assert_eq!( test_utils::to_vec3_round(&pool, 4)?, [[[0.085]], [[0.0078]]] ); let t = t.reshape((1, 1, 4, 8))?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!( test_utils::to_vec2_round(&pool, 4)?, [ [0.7745, 0.0276, -1.6983, 0.12], [0.3542, 0.1625, 0.4542, -0.0014] ] ); Ok(()) } fn upsample_nearest2d(dev: &Device) -> Result<()> { let t = Tensor::arange(0f32, 6f32, dev)?.reshape((1, 1, 2, 3))?; let upsampled = t.upsample_nearest2d(4, 6)?.i(0)?.i(0)?; assert_eq!( t.i(0)?.i(0)?.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]] ); assert_eq!( upsampled.to_vec2::<f32>()?, [ [0.0, 0.0, 1.0, 1.0, 2.0, 2.0], [0.0, 0.0, 1.0, 1.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0, 5.0, 5.0], [3.0, 3.0, 4.0, 4.0, 5.0, 5.0] ] ); Ok(()) } test_device!(avg_pool2d, avg_pool2d_cpu, avg_pool2d_gpu, avg_pool2d_metal); test_device!( avg_pool2d_pytorch, avg_pool2d_pytorch_cpu, avg_pool2d_pytorch_gpu, avg_pool2d_pytorch_metal ); test_device!(max_pool2d, max_pool2d_cpu, max_pool2d_gpu, max_pool2d_metal); test_device!( upsample_nearest2d, upsample_nearest2d_cpu, upsample_nearest2d_gpu, upsample_nearest2d_metal );
candle/candle-core/tests/pool_tests.rs/0
{ "file_path": "candle/candle-core/tests/pool_tests.rs", "repo_id": "candle", "token_count": 2112 }
//! Helper functions for the tinystories dataset. This uses the pre-tokenized version as generated //! by the tools from https://github.com/karpathy/llama2.c use candle::{Device, Result, Tensor}; pub struct Dataset { valid_tokens: Vec<memmap2::Mmap>, train_tokens: Vec<memmap2::Mmap>, } fn mmap_file(p: &std::path::PathBuf) -> Result<memmap2::Mmap> { let file = std::fs::File::open(p)?; let mmap = unsafe { memmap2::MmapOptions::new().map(&file)? }; Ok(mmap) } impl Dataset { pub fn new<P: AsRef<std::path::Path>>(dir: P) -> Result<Self> { let dir = dir.as_ref(); let mut bin_files = vec![]; for file in std::fs::read_dir(dir)?.flatten() { let file = file.path(); if let Some(extension) = file.extension() { if extension == "bin" { bin_files.push(file) } } } if bin_files.len() < 2 { candle::bail!("found less than two bin files in {:?}", dir) } bin_files.sort(); let valid_tokens = mmap_file(&bin_files[0])?; let train_tokens = bin_files[1..] .iter() .map(mmap_file) .collect::<Result<Vec<_>>>()?; Ok(Self { valid_tokens: vec![valid_tokens], train_tokens, }) } pub fn train_tokens(&self) -> usize { self.train_tokens.len() } pub fn valid_tokens(&self) -> usize { self.valid_tokens.len() } } pub struct DatasetRandomIter<'a> { all_tokens: &'a [memmap2::Mmap], tokens: Vec<&'a memmap2::Mmap>, current_tokens: &'a memmap2::Mmap, indexes_in_bytes: Vec<usize>, seq_len: usize, device: Device, } impl<'a> DatasetRandomIter<'a> { pub fn new(ds: &'a Dataset, valid: bool, seq_len: usize, device: Device) -> Self { use rand::seq::SliceRandom; use rand::thread_rng; let all_tokens = if valid { &ds.valid_tokens } else { &ds.train_tokens }; let mut tokens = all_tokens.iter().collect::<Vec<_>>(); tokens.shuffle(&mut thread_rng()); let current_tokens = tokens.pop().unwrap(); let seq_len_in_bytes = seq_len * 2; let mut indexes_in_bytes = (0..current_tokens.len() - seq_len_in_bytes) .step_by(seq_len_in_bytes) .collect::<Vec<_>>(); indexes_in_bytes.shuffle(&mut thread_rng()); Self { all_tokens, tokens, current_tokens, indexes_in_bytes, seq_len, device, } } } impl Iterator for DatasetRandomIter<'_> { type Item = Result<(Tensor, Tensor)>; fn next(&mut self) -> Option<Self::Item> { use byteorder::{LittleEndian, ReadBytesExt}; use rand::seq::SliceRandom; use rand::thread_rng; let seq_len = self.seq_len; if self.indexes_in_bytes.is_empty() { if self.tokens.is_empty() { self.tokens = self.all_tokens.iter().collect(); self.tokens.shuffle(&mut thread_rng()); } self.current_tokens = self.tokens.pop().unwrap(); let seq_len_in_bytes = self.seq_len * 2; self.indexes_in_bytes = (0..self.current_tokens.len() - seq_len_in_bytes) .step_by(seq_len_in_bytes) .collect::<Vec<_>>(); self.indexes_in_bytes.shuffle(&mut thread_rng()); } let start_idx = self.indexes_in_bytes.pop().unwrap(); let bytes = &self.current_tokens[start_idx..start_idx + 2 * (seq_len + 1)]; let mut tokens = vec![0u16; bytes.len() / 2]; if let Err(err) = std::io::Cursor::new(bytes).read_u16_into::<LittleEndian>(&mut tokens) { return Some(Err(err.into())); } let tokens = tokens.into_iter().map(|v| v as u32).collect::<Vec<_>>(); let inputs = Tensor::new(&tokens[..seq_len], &self.device); let targets = Tensor::new(&tokens[1..], &self.device); Some(candle::error::zip(inputs, targets)) } }
candle/candle-datasets/src/nlp/tinystories.rs/0
{ "file_path": "candle/candle-datasets/src/nlp/tinystories.rs", "repo_id": "candle", "token_count": 2092 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::Parser; use candle::{DType, Device, Result, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::models::blip; use candle_transformers::models::quantized_blip; use tokenizers::Tokenizer; enum Model { M(blip::BlipForConditionalGeneration), Q(quantized_blip::BlipForConditionalGeneration), } impl Model { fn text_decoder_forward(&mut self, xs: &Tensor, img_xs: &Tensor) -> Result<Tensor> { match self { Self::M(m) => m.text_decoder().forward(xs, img_xs), Self::Q(m) => m.text_decoder().forward(xs, img_xs), } } } // TODO: Maybe add support for the conditional prompt. #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] tokenizer: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Use the quantized version of the model. #[arg(long)] quantized: bool, } const SEP_TOKEN_ID: u32 = 102; /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 384, 384). OpenAI normalization is applied. pub fn load_image<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill(384, 384, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (384, 384, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], &Device::Cpu)? .reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; if args.quantized { let api = api.model("lmz/candle-blip".to_string()); api.get("blip-image-captioning-large-q4k.gguf")? } else { let api = api.repo(hf_hub::Repo::with_revision( "Salesforce/blip-image-captioning-large".to_string(), hf_hub::RepoType::Model, "refs/pr/18".to_string(), )); api.get("model.safetensors")? } } Some(model) => model.into(), }; let tokenizer = match args.tokenizer { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("Salesforce/blip-image-captioning-large".to_string()); api.get("tokenizer.json")? } Some(file) => file.into(), }; let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let mut tokenizer = TokenOutputStream::new(tokenizer); let mut logits_processor = candle_transformers::generation::LogitsProcessor::new(1337, None, None); let config = blip::Config::image_captioning_large(); let device = candle_examples::device(args.cpu)?; let (image_embeds, device, mut model) = if args.quantized { let device = Device::Cpu; let image = load_image(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let vb = quantized_blip::VarBuilder::from_gguf(model_file, &device)?; let model = quantized_blip::BlipForConditionalGeneration::new(&config, vb)?; let image_embeds = image.unsqueeze(0)?.apply(model.vision_model())?; (image_embeds, device, Model::Q(model)) } else { let image = load_image(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = blip::BlipForConditionalGeneration::new(&config, vb)?; let image_embeds = image.unsqueeze(0)?.apply(model.vision_model())?; (image_embeds, device, Model::M(model)) }; let mut token_ids = vec![30522u32]; for index in 0..1000 { let context_size = if index > 0 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = model.text_decoder_forward(&input_ids, &image_embeds)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; if token == SEP_TOKEN_ID { break; } token_ids.push(token); if let Some(t) = tokenizer.next_token(token)? { use std::io::Write; print!("{t}"); std::io::stdout().flush()?; } } if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } println!(); Ok(()) }
candle/candle-examples/examples/blip/main.rs/0
{ "file_path": "candle/candle-examples/examples/blip/main.rs", "repo_id": "candle", "token_count": 2436 }
pub enum SeparatorStyle { Two, Mpt, } pub struct Conversation { pub system: String, pub roles: Vec<String>, pub messages: Vec<(String, Option<String>)>, pub offset: i32, pub sep_style: SeparatorStyle, pub sep: String, pub sep2: Option<String>, pub version: String, } impl Conversation { pub fn new( system: &str, roles: &[String], offset: i32, sep_style: SeparatorStyle, sep: &str, sep2: Option<&str>, version: &str, ) -> Self { Conversation { system: system.to_string(), roles: roles.to_vec(), messages: Vec::new(), offset, sep_style, sep: sep.to_string(), sep2: sep2.map(|s| s.to_string()), version: version.to_string(), } } pub fn conv_chatml_direct() -> Self { Conversation::new( "<|im_start|>system\nAnswer the questions.", &[ "<|im_start|>user\n".to_string(), "<|im_start|>assistant\n".to_string(), ], 0, SeparatorStyle::Mpt, "<|im_end|>", None, "mpt", ) } pub fn conv_llava_v1() -> Self { Conversation::new( "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.", &[ "USER".to_string(), "ASSISTANT".to_string(), ], 0, SeparatorStyle::Two, " ", Some("</s>"), "v1" ) } pub fn append_message(&mut self, role: String, message: Option<&str>) { self.messages.push((role, message.map(|s| s.to_string()))) } pub fn append_user_message(&mut self, message: Option<&str>) { self.append_message(self.roles[0].clone(), message); } pub fn append_assistant_message(&mut self, message: Option<&str>) { self.append_message(self.roles[1].clone(), message); } pub fn get_prompt(&self) -> String { match self.sep_style { SeparatorStyle::Mpt => { let mut ret = String::new(); ret.push_str(&self.system); ret.push_str(&self.sep); for (role, message) in &self.messages { ret.push_str(role); if let Some(message) = message { ret.push_str(message); }; ret.push_str(&self.sep); } ret } SeparatorStyle::Two => { let seps = [self.sep.clone(), self.sep2.clone().unwrap()]; let mut ret = String::new(); ret.push_str(&self.system); ret.push_str(&seps[0]); for (i, (role, message)) in self.messages.iter().enumerate() { ret.push_str(role); if let Some(message) = message { ret.push_str(": "); // strictly follow the python implementation, otherwise it will cause some minor difference between tokens ^_^ ret.push_str(message); ret.push_str(&seps[i % 2]); } else { ret.push(':') } } ret } } } }
candle/candle-examples/examples/llava/conversation.rs/0
{ "file_path": "candle/candle-examples/examples/llava/conversation.rs", "repo_id": "candle", "token_count": 1910 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle::{DType, IndexOp, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::mimi::{Config, Model}; use clap::{Parser, ValueEnum}; use hf_hub::api::sync::Api; mod audio_io; #[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)] enum Action { AudioToAudio, AudioToCode, CodeToAudio, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// The action to be performed, specifies the format for the input and output data. action: Action, /// The input file, either an audio file or some mimi tokens stored as safetensors. in_file: String, /// The output file, either a wave audio file or some mimi tokens stored as safetensors. out_file: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// The model weight file, in safetensor format. #[arg(long)] model: Option<String>, /// Whether to use streaming or not, when streaming slices of data of the given size are passed /// to the encoder/decoder one at a time. #[arg(long)] streaming: Option<usize>, } fn main() -> Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => Api::new()? .model("kyutai/mimi".to_string()) .get("model.safetensors")?, }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? }; let config = Config::v0_1(None); let mut model = Model::new(config, vb)?; let codes = match args.action { Action::CodeToAudio => { let codes = candle::safetensors::load(args.in_file, &device)?; codes.get("codes").expect("no codes in input file").clone() } Action::AudioToCode | Action::AudioToAudio => { let pcm = if args.in_file == "-" { println!(">>>> RECORDING AUDIO, PRESS ENTER ONCE DONE <<<<"); let (stream, input_audio) = audio_io::setup_input_stream()?; let mut pcms = vec![]; let stdin = std::thread::spawn(|| { let mut s = String::new(); std::io::stdin().read_line(&mut s) }); while !stdin.is_finished() { let input = input_audio.lock().unwrap().take_all(); if input.is_empty() { std::thread::sleep(std::time::Duration::from_millis(100)); continue; } pcms.push(input) } drop(stream); pcms.concat() } else { let (pcm, sample_rate) = audio_io::pcm_decode(args.in_file)?; if sample_rate != 24_000 { println!("WARNING: mimi uses a 24khz sample rate, input uses {sample_rate}, resampling..."); audio_io::resample(&pcm, sample_rate as usize, 24_000)? } else { pcm } }; match args.streaming { Some(chunk_size) => { let mut code_chunks = vec![]; for pcm in pcm.chunks(chunk_size) { let pcm = Tensor::new(pcm, &device)?.reshape((1, 1, ()))?; let code_chunk = model.encode(&pcm)?; code_chunks.push(code_chunk) } Tensor::cat(&code_chunks, candle::D::Minus1)? } None => { let pcm_len = pcm.len(); let pcm = Tensor::from_vec(pcm, (1, 1, pcm_len), &device)?; println!("input pcm shape: {:?}", pcm.shape()); model.encode(&pcm)? } } } }; println!("codes shape: {:?}", codes.shape()); model.reset_state(); match args.action { Action::AudioToCode => { codes.save_safetensors("codes", &args.out_file)?; } Action::AudioToAudio | Action::CodeToAudio => { let pcm = match args.streaming { Some(chunk_size) => { let seq_len = codes.dim(candle::D::Minus1)?; let mut pcm_chunks = vec![]; for chunk_start in (0..seq_len).step_by(chunk_size) { let chunk_len = usize::min(chunk_size, seq_len - chunk_start); let codes = codes.narrow(candle::D::Minus1, chunk_start, chunk_len)?; let pcm = model.decode_step(&codes.into())?; if let Some(pcm) = pcm.as_option() { pcm_chunks.push(pcm.clone()) } } Tensor::cat(&pcm_chunks, candle::D::Minus1)? } None => model.decode(&codes)?, }; println!("output pcm shape: {:?}", pcm.shape()); let pcm = pcm.i(0)?.i(0)?; let pcm = candle_examples::audio::normalize_loudness(&pcm, 24_000, true)?; let pcm = pcm.to_vec1::<f32>()?; if args.out_file == "-" { let (stream, ad) = audio_io::setup_output_stream()?; { let mut ad = ad.lock().unwrap(); ad.push_samples(&pcm)?; } loop { let ad = ad.lock().unwrap(); if ad.is_empty() { break; } // That's very weird, calling thread::sleep here triggers the stream to stop // playing (the callback doesn't seem to be called anymore). // std::thread::sleep(std::time::Duration::from_millis(100)); } drop(stream) } else { let mut output = std::fs::File::create(&args.out_file)?; candle_examples::wav::write_pcm_as_wav(&mut output, &pcm, 24_000)?; } } } Ok(()) }
candle/candle-examples/examples/mimi/main.rs/0
{ "file_path": "candle/candle-examples/examples/mimi/main.rs", "repo_id": "candle", "token_count": 3353 }
#![allow(dead_code)] // https://huggingface.co/facebook/musicgen-small/tree/main // https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/models/musicgen/modeling_musicgen.py // TODO: Add an offline mode. // TODO: Add a KV cache. #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; mod musicgen_model; use musicgen_model::{GenConfig, MusicgenForConditionalGeneration}; use anyhow::{Error as E, Result}; use candle::{DType, Tensor}; use candle_nn::VarBuilder; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; const DTYPE: DType = DType::F32; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// The model weight file, in safetensor format. #[arg(long)] model: Option<String>, /// The tokenizer config. #[arg(long)] tokenizer: Option<String>, #[arg( long, default_value = "90s rock song with loud guitars and heavy drums" )] prompt: String, } fn main() -> Result<()> { use tokenizers::Tokenizer; let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let tokenizer = match args.tokenizer { Some(tokenizer) => std::path::PathBuf::from(tokenizer), None => Api::new()? .model("facebook/musicgen-small".to_string()) .get("tokenizer.json")?, }; let mut tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => Api::new()? .repo(Repo::with_revision( "facebook/musicgen-small".to_string(), RepoType::Model, "refs/pr/13".to_string(), )) .get("model.safetensors")?, }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DTYPE, &device)? }; let config = GenConfig::small(); let mut model = MusicgenForConditionalGeneration::load(vb, config)?; let tokens = tokenizer .encode(args.prompt.as_str(), true) .map_err(E::msg)? .get_ids() .to_vec(); println!("tokens: {tokens:?}"); let tokens = Tensor::new(tokens.as_slice(), &device)?.unsqueeze(0)?; println!("{tokens:?}"); let embeds = model.text_encoder.forward(&tokens)?; println!("{embeds}"); Ok(()) }
candle/candle-examples/examples/musicgen/main.rs/0
{ "file_path": "candle/candle-examples/examples/musicgen/main.rs", "repo_id": "candle", "token_count": 1151 }
use std::collections::VecDeque; use candle::{DType, Device, Error, Module, Result, Tensor, Var}; use candle_nn::{ func, linear, sequential::seq, Activation, AdamW, Optimizer, ParamsAdamW, Sequential, VarBuilder, VarMap, }; use rand::{distributions::Uniform, thread_rng, Rng}; use super::gym_env::GymEnv; pub struct OuNoise { mu: f64, theta: f64, sigma: f64, state: Tensor, } impl OuNoise { pub fn new(mu: f64, theta: f64, sigma: f64, size_action: usize) -> Result<Self> { Ok(Self { mu, theta, sigma, state: Tensor::ones(size_action, DType::F32, &Device::Cpu)?, }) } pub fn sample(&mut self) -> Result<Tensor> { let rand = Tensor::randn_like(&self.state, 0.0, 1.0)?; let dx = ((self.theta * (self.mu - &self.state)?)? + (self.sigma * rand)?)?; self.state = (&self.state + dx)?; Ok(self.state.clone()) } } #[derive(Clone)] struct Transition { state: Tensor, action: Tensor, reward: Tensor, next_state: Tensor, terminated: bool, truncated: bool, } impl Transition { fn new( state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) -> Self { Self { state: state.clone(), action: action.clone(), reward: reward.clone(), next_state: next_state.clone(), terminated, truncated, } } } pub struct ReplayBuffer { buffer: VecDeque<Transition>, capacity: usize, size: usize, } impl ReplayBuffer { pub fn new(capacity: usize) -> Self { Self { buffer: VecDeque::with_capacity(capacity), capacity, size: 0, } } pub fn push( &mut self, state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) { if self.size == self.capacity { self.buffer.pop_front(); } else { self.size += 1; } self.buffer.push_back(Transition::new( state, action, reward, next_state, terminated, truncated, )); } #[allow(clippy::type_complexity)] pub fn random_batch( &self, batch_size: usize, ) -> Result<Option<(Tensor, Tensor, Tensor, Tensor, Vec<bool>, Vec<bool>)>> { if self.size < batch_size { Ok(None) } else { let transitions: Vec<&Transition> = thread_rng() .sample_iter(Uniform::from(0..self.size)) .take(batch_size) .map(|i| self.buffer.get(i).unwrap()) .collect(); let states: Vec<Tensor> = transitions .iter() .map(|t| t.state.unsqueeze(0)) .collect::<Result<_>>()?; let actions: Vec<Tensor> = transitions .iter() .map(|t| t.action.unsqueeze(0)) .collect::<Result<_>>()?; let rewards: Vec<Tensor> = transitions .iter() .map(|t| t.reward.unsqueeze(0)) .collect::<Result<_>>()?; let next_states: Vec<Tensor> = transitions .iter() .map(|t| t.next_state.unsqueeze(0)) .collect::<Result<_>>()?; let terminateds: Vec<bool> = transitions.iter().map(|t| t.terminated).collect(); let truncateds: Vec<bool> = transitions.iter().map(|t| t.truncated).collect(); Ok(Some(( Tensor::cat(&states, 0)?, Tensor::cat(&actions, 0)?, Tensor::cat(&rewards, 0)?, Tensor::cat(&next_states, 0)?, terminateds, truncateds, ))) } } } fn track( varmap: &mut VarMap, vb: &VarBuilder, target_prefix: &str, network_prefix: &str, dims: &[(usize, usize)], tau: f64, ) -> Result<()> { for (i, &(in_dim, out_dim)) in dims.iter().enumerate() { let target_w = vb.get((out_dim, in_dim), &format!("{target_prefix}-fc{i}.weight"))?; let network_w = vb.get((out_dim, in_dim), &format!("{network_prefix}-fc{i}.weight"))?; varmap.set_one( format!("{target_prefix}-fc{i}.weight"), ((tau * network_w)? + ((1.0 - tau) * target_w)?)?, )?; let target_b = vb.get(out_dim, &format!("{target_prefix}-fc{i}.bias"))?; let network_b = vb.get(out_dim, &format!("{network_prefix}-fc{i}.bias"))?; varmap.set_one( format!("{target_prefix}-fc{i}.bias"), ((tau * network_b)? + ((1.0 - tau) * target_b)?)?, )?; } Ok(()) } #[allow(unused)] struct Actor<'a> { varmap: VarMap, vb: VarBuilder<'a>, network: Sequential, target_network: Sequential, size_state: usize, size_action: usize, dims: Vec<(usize, usize)>, } impl Actor<'_> { fn new(device: &Device, dtype: DType, size_state: usize, size_action: usize) -> Result<Self> { let mut varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, dtype, device); let dims = vec![(size_state, 400), (400, 300), (300, size_action)]; let make_network = |prefix: &str| { let seq = seq() .add(linear( dims[0].0, dims[0].1, vb.pp(format!("{prefix}-fc0")), )?) .add(Activation::Relu) .add(linear( dims[1].0, dims[1].1, vb.pp(format!("{prefix}-fc1")), )?) .add(Activation::Relu) .add(linear( dims[2].0, dims[2].1, vb.pp(format!("{prefix}-fc2")), )?) .add(func(|xs| xs.tanh())); Ok::<Sequential, Error>(seq) }; let network = make_network("actor")?; let target_network = make_network("target-actor")?; // this sets the two networks to be equal to each other using tau = 1.0 track(&mut varmap, &vb, "target-actor", "actor", &dims, 1.0)?; Ok(Self { varmap, vb, network, target_network, size_state, size_action, dims, }) } fn forward(&self, state: &Tensor) -> Result<Tensor> { self.network.forward(state) } fn target_forward(&self, state: &Tensor) -> Result<Tensor> { self.target_network.forward(state) } fn track(&mut self, tau: f64) -> Result<()> { track( &mut self.varmap, &self.vb, "target-actor", "actor", &self.dims, tau, ) } } #[allow(unused)] struct Critic<'a> { varmap: VarMap, vb: VarBuilder<'a>, network: Sequential, target_network: Sequential, size_state: usize, size_action: usize, dims: Vec<(usize, usize)>, } impl Critic<'_> { fn new(device: &Device, dtype: DType, size_state: usize, size_action: usize) -> Result<Self> { let mut varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, dtype, device); let dims: Vec<(usize, usize)> = vec![(size_state + size_action, 400), (400, 300), (300, 1)]; let make_network = |prefix: &str| { let seq = seq() .add(linear( dims[0].0, dims[0].1, vb.pp(format!("{prefix}-fc0")), )?) .add(Activation::Relu) .add(linear( dims[1].0, dims[1].1, vb.pp(format!("{prefix}-fc1")), )?) .add(Activation::Relu) .add(linear( dims[2].0, dims[2].1, vb.pp(format!("{prefix}-fc2")), )?); Ok::<Sequential, Error>(seq) }; let network = make_network("critic")?; let target_network = make_network("target-critic")?; // this sets the two networks to be equal to each other using tau = 1.0 track(&mut varmap, &vb, "target-critic", "critic", &dims, 1.0)?; Ok(Self { varmap, vb, network, target_network, size_state, size_action, dims, }) } fn forward(&self, state: &Tensor, action: &Tensor) -> Result<Tensor> { let xs = Tensor::cat(&[action, state], 1)?; self.network.forward(&xs) } fn target_forward(&self, state: &Tensor, action: &Tensor) -> Result<Tensor> { let xs = Tensor::cat(&[action, state], 1)?; self.target_network.forward(&xs) } fn track(&mut self, tau: f64) -> Result<()> { track( &mut self.varmap, &self.vb, "target-critic", "critic", &self.dims, tau, ) } } #[allow(unused)] #[allow(clippy::upper_case_acronyms)] pub struct DDPG<'a> { actor: Actor<'a>, actor_optim: AdamW, critic: Critic<'a>, critic_optim: AdamW, gamma: f64, tau: f64, replay_buffer: ReplayBuffer, ou_noise: OuNoise, size_state: usize, size_action: usize, pub train: bool, } impl DDPG<'_> { #[allow(clippy::too_many_arguments)] pub fn new( device: &Device, size_state: usize, size_action: usize, train: bool, actor_lr: f64, critic_lr: f64, gamma: f64, tau: f64, buffer_capacity: usize, ou_noise: OuNoise, ) -> Result<Self> { let filter_by_prefix = |varmap: &VarMap, prefix: &str| { varmap .data() .lock() .unwrap() .iter() .filter_map(|(name, var)| name.starts_with(prefix).then_some(var.clone())) .collect::<Vec<Var>>() }; let actor = Actor::new(device, DType::F32, size_state, size_action)?; let actor_optim = AdamW::new( filter_by_prefix(&actor.varmap, "actor"), ParamsAdamW { lr: actor_lr, ..Default::default() }, )?; let critic = Critic::new(device, DType::F32, size_state, size_action)?; let critic_optim = AdamW::new( filter_by_prefix(&critic.varmap, "critic"), ParamsAdamW { lr: critic_lr, ..Default::default() }, )?; Ok(Self { actor, actor_optim, critic, critic_optim, gamma, tau, replay_buffer: ReplayBuffer::new(buffer_capacity), ou_noise, size_state, size_action, train, }) } pub fn remember( &mut self, state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) { self.replay_buffer .push(state, action, reward, next_state, terminated, truncated) } pub fn actions(&mut self, state: &Tensor) -> Result<f32> { let actions = self .actor .forward(&state.detach().unsqueeze(0)?)? .squeeze(0)?; let actions = if self.train { (actions + self.ou_noise.sample()?)? } else { actions }; actions.squeeze(0)?.to_scalar::<f32>() } pub fn train(&mut self, batch_size: usize) -> Result<()> { let (states, actions, rewards, next_states, _, _) = match self.replay_buffer.random_batch(batch_size)? { Some(v) => v, _ => return Ok(()), }; let q_target = self .critic .target_forward(&next_states, &self.actor.target_forward(&next_states)?)?; let q_target = (rewards + (self.gamma * q_target)?.detach())?; let q = self.critic.forward(&states, &actions)?; let diff = (q_target - q)?; let critic_loss = diff.sqr()?.mean_all()?; self.critic_optim.backward_step(&critic_loss)?; let actor_loss = self .critic .forward(&states, &self.actor.forward(&states)?)? .mean_all()? .neg()?; self.actor_optim.backward_step(&actor_loss)?; self.critic.track(self.tau)?; self.actor.track(self.tau)?; Ok(()) } } // The impact of the q value of the next state on the current state's q value. const GAMMA: f64 = 0.99; // The weight for updating the target networks. const TAU: f64 = 0.005; // The capacity of the replay buffer used for sampling training data. const REPLAY_BUFFER_CAPACITY: usize = 100_000; // The training batch size for each training iteration. const TRAINING_BATCH_SIZE: usize = 100; // The total number of episodes. const MAX_EPISODES: usize = 100; // The maximum length of an episode. const EPISODE_LENGTH: usize = 200; // The number of training iterations after one episode finishes. const TRAINING_ITERATIONS: usize = 200; // Ornstein-Uhlenbeck process parameters. const MU: f64 = 0.0; const THETA: f64 = 0.15; const SIGMA: f64 = 0.1; const ACTOR_LEARNING_RATE: f64 = 1e-4; const CRITIC_LEARNING_RATE: f64 = 1e-3; pub fn run() -> Result<()> { let env = GymEnv::new("Pendulum-v1")?; println!("action space: {}", env.action_space()); println!("observation space: {:?}", env.observation_space()); let size_state = env.observation_space().iter().product::<usize>(); let size_action = env.action_space(); let mut agent = DDPG::new( &Device::Cpu, size_state, size_action, true, ACTOR_LEARNING_RATE, CRITIC_LEARNING_RATE, GAMMA, TAU, REPLAY_BUFFER_CAPACITY, OuNoise::new(MU, THETA, SIGMA, size_action)?, )?; let mut rng = rand::thread_rng(); for episode in 0..MAX_EPISODES { // let mut state = env.reset(episode as u64)?; let mut state = env.reset(rng.gen::<u64>())?; let mut total_reward = 0.0; for _ in 0..EPISODE_LENGTH { let mut action = 2.0 * agent.actions(&state)?; action = action.clamp(-2.0, 2.0); let step = env.step(vec![action])?; total_reward += step.reward; agent.remember( &state, &Tensor::new(vec![action], &Device::Cpu)?, &Tensor::new(vec![step.reward as f32], &Device::Cpu)?, &step.state, step.terminated, step.truncated, ); if step.terminated || step.truncated { break; } state = step.state; } println!("episode {episode} with total reward of {total_reward}"); for _ in 0..TRAINING_ITERATIONS { agent.train(TRAINING_BATCH_SIZE)?; } } println!("Testing..."); agent.train = false; for episode in 0..10 { // let mut state = env.reset(episode as u64)?; let mut state = env.reset(rng.gen::<u64>())?; let mut total_reward = 0.0; for _ in 0..EPISODE_LENGTH { let mut action = 2.0 * agent.actions(&state)?; action = action.clamp(-2.0, 2.0); let step = env.step(vec![action])?; total_reward += step.reward; if step.terminated || step.truncated { break; } state = step.state; } println!("episode {episode} with total reward of {total_reward}"); } Ok(()) }
candle/candle-examples/examples/reinforcement-learning/ddpg.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/ddpg.rs", "repo_id": "candle", "token_count": 8541 }
[ { "index": 1, "color": "#787878", "label": "wall" }, { "index": 2, "color": "#B47878", "label": "building;edifice" }, { "index": 3, "color": "#06E6E6", "label": "sky" }, { "index": 4, "color": "#503232", "label": "floor;flooring" }, { "index": 5, "color": "#04C803", "label": "tree" }, { "index": 6, "color": "#787850", "label": "ceiling" }, { "index": 7, "color": "#8C8C8C", "label": "road;route" }, { "index": 8, "color": "#CC05FF", "label": "bed" }, { "index": 9, "color": "#E6E6E6", "label": "windowpane;window" }, { "index": 10, "color": "#04FA07", "label": "grass" }, { "index": 11, "color": "#E005FF", "label": "cabinet" }, { "index": 12, "color": "#EBFF07", "label": "sidewalk;pavement" }, { "index": 13, "color": "#96053D", "label": "person;individual;someone;somebody;mortal;soul" }, { "index": 14, "color": "#787846", "label": "earth;ground" }, { "index": 15, "color": "#08FF33", "label": "door;double;door" }, { "index": 16, "color": "#FF0652", "label": "table" }, { "index": 17, "color": "#8FFF8C", "label": "mountain;mount" }, { "index": 18, "color": "#CCFF04", "label": "plant;flora;plant;life" }, { "index": 19, "color": "#FF3307", "label": "curtain;drape;drapery;mantle;pall" }, { "index": 20, "color": "#CC4603", "label": "chair" }, { "index": 21, "color": "#0066C8", "label": "car;auto;automobile;machine;motorcar" }, { "index": 22, "color": "#3DE6FA", "label": "water" }, { "index": 23, "color": "#FF0633", "label": "painting;picture" }, { "index": 24, "color": "#0B66FF", "label": "sofa;couch;lounge" }, { "index": 25, "color": "#FF0747", "label": "shelf" }, { "index": 26, "color": "#FF09E0", "label": "house" }, { "index": 27, "color": "#0907E6", "label": "sea" }, { "index": 28, "color": "#DCDCDC", "label": "mirror" }, { "index": 29, "color": "#FF095C", "label": "rug;carpet;carpeting" }, { "index": 30, "color": "#7009FF", "label": "field" }, { "index": 31, "color": "#08FFD6", "label": "armchair" }, { "index": 32, "color": "#07FFE0", "label": "seat" }, { "index": 33, "color": "#FFB806", "label": "fence;fencing" }, { "index": 34, "color": "#0AFF47", "label": "desk" }, { "index": 35, "color": "#FF290A", "label": "rock;stone" }, { "index": 36, "color": "#07FFFF", "label": "wardrobe;closet;press" }, { "index": 37, "color": "#E0FF08", "label": "lamp" }, { "index": 38, "color": "#6608FF", "label": "bathtub;bathing;tub;bath;tub" }, { "index": 39, "color": "#FF3D06", "label": "railing;rail" }, { "index": 40, "color": "#FFC207", "label": "cushion" }, { "index": 41, "color": "#FF7A08", "label": "base;pedestal;stand" }, { "index": 42, "color": "#00FF14", "label": "box" }, { "index": 43, "color": "#FF0829", "label": "column;pillar" }, { "index": 44, "color": "#FF0599", "label": "signboard;sign" }, { "index": 45, "color": "#0633FF", "label": "chest;of;drawers;chest;bureau;dresser" }, { "index": 46, "color": "#EB0CFF", "label": "counter" }, { "index": 47, "color": "#A09614", "label": "sand" }, { "index": 48, "color": "#00A3FF", "label": "sink" }, { "index": 49, "color": "#8C8C8C", "label": "skyscraper" }, { "index": 50, "color": "#FA0A0F", "label": "fireplace;hearth;open;fireplace" }, { "index": 51, "color": "#14FF00", "label": "refrigerator;icebox" }, { "index": 52, "color": "#1FFF00", "label": "grandstand;covered;stand" }, { "index": 53, "color": "#FF1F00", "label": "path" }, { "index": 54, "color": "#FFE000", "label": "stairs;steps" }, { "index": 55, "color": "#99FF00", "label": "runway" }, { "index": 56, "color": "#0000FF", "label": "case;display;case;showcase;vitrine" }, { "index": 57, "color": "#FF4700", "label": "pool;table;billiard;table;snooker;table" }, { "index": 58, "color": "#00EBFF", "label": "pillow" }, { "index": 59, "color": "#00ADFF", "label": "screen;door;screen" }, { "index": 60, "color": "#1F00FF", "label": "stairway;staircase" }, { "index": 61, "color": "#0BC8C8", "label": "river" }, { "index": 62, "color": "#FF5200", "label": "bridge;span" }, { "index": 63, "color": "#00FFF5", "label": "bookcase" }, { "index": 64, "color": "#003DFF", "label": "blind;screen" }, { "index": 65, "color": "#00FF70", "label": "coffee;table;cocktail;table" }, { "index": 66, "color": "#00FF85", "label": "toilet;can;commode;crapper;pot;potty;stool;throne" }, { "index": 67, "color": "#FF0000", "label": "flower" }, { "index": 68, "color": "#FFA300", "label": "book" }, { "index": 69, "color": "#FF6600", "label": "hill" }, { "index": 70, "color": "#C2FF00", "label": "bench" }, { "index": 71, "color": "#008FFF", "label": "countertop" }, { "index": 72, "color": "#33FF00", "label": "stove;kitchen;stove;range;kitchen;range;cooking;stove" }, { "index": 73, "color": "#0052FF", "label": "palm;palm;tree" }, { "index": 74, "color": "#00FF29", "label": "kitchen;island" }, { "index": 75, "color": "#00FFAD", "label": "computer;computing;machine;computing;device;data;processor;electronic;computer;information;processing;system" }, { "index": 76, "color": "#0A00FF", "label": "swivel;chair" }, { "index": 77, "color": "#ADFF00", "label": "boat" }, { "index": 78, "color": "#00FF99", "label": "bar" }, { "index": 79, "color": "#FF5C00", "label": "arcade;machine" }, { "index": 80, "color": "#FF00FF", "label": "hovel;hut;hutch;shack;shanty" }, { "index": 81, "color": "#FF00F5", "label": "bus;autobus;coach;charabanc;double-decker;jitney;motorbus;motorcoach;omnibus;passenger;vehicle" }, { "index": 82, "color": "#FF0066", "label": "towel" }, { "index": 83, "color": "#FFAD00", "label": "light;light;source" }, { "index": 84, "color": "#FF0014", "label": "truck;motortruck" }, { "index": 85, "color": "#FFB8B8", "label": "tower" }, { "index": 86, "color": "#001FFF", "label": "chandelier;pendant;pendent" }, { "index": 87, "color": "#00FF3D", "label": "awning;sunshade;sunblind" }, { "index": 88, "color": "#0047FF", "label": "streetlight;street;lamp" }, { "index": 89, "color": "#FF00CC", "label": "booth;cubicle;stall;kiosk" }, { "index": 90, "color": "#00FFC2", "label": "television;television;receiver;television;set;tv;tv;set;idiot;box;boob;tube;telly;goggle;box" }, { "index": 91, "color": "#00FF52", "label": "airplane;aeroplane;plane" }, { "index": 92, "color": "#000AFF", "label": "dirt;track" }, { "index": 93, "color": "#0070FF", "label": "apparel;wearing;apparel;dress;clothes" }, { "index": 94, "color": "#3300FF", "label": "pole" }, { "index": 95, "color": "#00C2FF", "label": "land;ground;soil" }, { "index": 96, "color": "#007AFF", "label": "bannister;banister;balustrade;balusters;handrail" }, { "index": 97, "color": "#00FFA3", "label": "escalator;moving;staircase;moving;stairway" }, { "index": 98, "color": "#FF9900", "label": "ottoman;pouf;pouffe;puff;hassock" }, { "index": 99, "color": "#00FF0A", "label": "bottle" }, { "index": 100, "color": "#FF7000", "label": "buffet;counter;sideboard" }, { "index": 101, "color": "#8FFF00", "label": "poster;posting;placard;notice;bill;card" }, { "index": 102, "color": "#5200FF", "label": "stage" }, { "index": 103, "color": "#A3FF00", "label": "van" }, { "index": 104, "color": "#FFEB00", "label": "ship" }, { "index": 105, "color": "#08B8AA", "label": "fountain" }, { "index": 106, "color": "#8500FF", "label": "conveyer;belt;conveyor;belt;conveyer;conveyor;transporter" }, { "index": 107, "color": "#00FF5C", "label": "canopy" }, { "index": 108, "color": "#B800FF", "label": "washer;automatic;washer;washing;machine" }, { "index": 109, "color": "#FF001F", "label": "plaything;toy" }, { "index": 110, "color": "#00B8FF", "label": "swimming;pool;swimming;bath;natatorium" }, { "index": 111, "color": "#00D6FF", "label": "stool" }, { "index": 112, "color": "#FF0070", "label": "barrel;cask" }, { "index": 113, "color": "#5CFF00", "label": "basket;handbasket" }, { "index": 114, "color": "#00E0FF", "label": "waterfall;falls" }, { "index": 115, "color": "#70E0FF", "label": "tent;collapsible;shelter" }, { "index": 116, "color": "#46B8A0", "label": "bag" }, { "index": 117, "color": "#A300FF", "label": "minibike;motorbike" }, { "index": 118, "color": "#9900FF", "label": "cradle" }, { "index": 119, "color": "#47FF00", "label": "oven" }, { "index": 120, "color": "#FF00A3", "label": "ball" }, { "index": 121, "color": "#FFCC00", "label": "food;solid;food" }, { "index": 122, "color": "#FF008F", "label": "step;stair" }, { "index": 123, "color": "#00FFEB", "label": "tank;storage;tank" }, { "index": 124, "color": "#85FF00", "label": "trade;name;brand;name;brand;marque" }, { "index": 125, "color": "#FF00EB", "label": "microwave;microwave;oven" }, { "index": 126, "color": "#F500FF", "label": "pot;flowerpot" }, { "index": 127, "color": "#FF007A", "label": "animal;animate;being;beast;brute;creature;fauna" }, { "index": 128, "color": "#FFF500", "label": "bicycle;bike;wheel;cycle" }, { "index": 129, "color": "#0ABED4", "label": "lake" }, { "index": 130, "color": "#D6FF00", "label": "dishwasher;dish;washer;dishwashing;machine" }, { "index": 131, "color": "#00CCFF", "label": "screen;silver;screen;projection;screen" }, { "index": 132, "color": "#1400FF", "label": "blanket;cover" }, { "index": 133, "color": "#FFFF00", "label": "sculpture" }, { "index": 134, "color": "#0099FF", "label": "hood;exhaust;hood" }, { "index": 135, "color": "#0029FF", "label": "sconce" }, { "index": 136, "color": "#00FFCC", "label": "vase" }, { "index": 137, "color": "#2900FF", "label": "traffic;light;traffic;signal;stoplight" }, { "index": 138, "color": "#29FF00", "label": "tray" }, { "index": 139, "color": "#AD00FF", "label": "ashcan;trash;can;garbage;can;wastebin;ash;bin;ash-bin;ashbin;dustbin;trash;barrel;trash;bin" }, { "index": 140, "color": "#00F5FF", "label": "fan" }, { "index": 141, "color": "#4700FF", "label": "pier;wharf;wharfage;dock" }, { "index": 142, "color": "#7A00FF", "label": "crt;screen" }, { "index": 143, "color": "#00FFB8", "label": "plate" }, { "index": 144, "color": "#005CFF", "label": "monitor;monitoring;device" }, { "index": 145, "color": "#B8FF00", "label": "bulletin;board;notice;board" }, { "index": 146, "color": "#0085FF", "label": "shower" }, { "index": 147, "color": "#FFD600", "label": "radiator" }, { "index": 148, "color": "#19C2C2", "label": "glass;drinking;glass" }, { "index": 149, "color": "#66FF00", "label": "clock" }, { "index": 150, "color": "#5C00FF", "label": "flag" } ]
candle/candle-examples/examples/segformer/assets/labels.json/0
{ "file_path": "candle/candle-examples/examples/segformer/assets/labels.json", "repo_id": "candle", "token_count": 6397 }
mod clip; mod sampling; mod vae; use candle::{DType, IndexOp, Tensor}; use candle_transformers::models::mmdit::model::{Config as MMDiTConfig, MMDiT}; use crate::clip::StableDiffusion3TripleClipWithTokenizer; use crate::vae::{build_sd3_vae_autoencoder, sd3_vae_vb_rename}; use anyhow::{Ok, Result}; use clap::Parser; #[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)] enum Which { #[value(name = "3-medium")] V3Medium, #[value(name = "3.5-large")] V3_5Large, #[value(name = "3.5-large-turbo")] V3_5LargeTurbo, #[value(name = "3.5-medium")] V3_5Medium, } impl Which { fn is_3_5(&self) -> bool { match self { Self::V3Medium => false, Self::V3_5Large | Self::V3_5LargeTurbo | Self::V3_5Medium => true, } } } #[derive(Parser)] #[command(author, version, about, long_about = None)] struct Args { /// The prompt to be used for image generation. #[arg( long, default_value = "A cute rusty robot holding a candle torch in its hand, \ with glowing neon text \"LETS GO RUSTY\" displayed on its chest, \ bright background, high quality, 4k" )] prompt: String, #[arg(long, default_value = "")] uncond_prompt: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Use flash_attn to accelerate attention operation in the MMDiT. #[arg(long)] use_flash_attn: bool, /// The height in pixels of the generated image. #[arg(long, default_value_t = 1024)] height: usize, /// The width in pixels of the generated image. #[arg(long, default_value_t = 1024)] width: usize, /// The model to use. #[arg(long, default_value = "3-medium")] which: Which, /// The seed to use when generating random samples. #[arg(long)] num_inference_steps: Option<usize>, /// CFG scale. #[arg(long)] cfg_scale: Option<f64>, /// Time shift factor (alpha). #[arg(long, default_value_t = 3.0)] time_shift: f64, /// Use Skip Layer Guidance (SLG) for the sampling. /// Currently only supports Stable Diffusion 3.5 Medium. #[arg(long)] use_slg: bool, /// The seed to use when generating random samples. #[arg(long)] seed: Option<u64>, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let Args { prompt, uncond_prompt, cpu, tracing, use_flash_attn, height, width, num_inference_steps, cfg_scale, time_shift, seed, which, use_slg, } = Args::parse(); let _guard = if tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let device = candle_examples::device(cpu)?; let default_inference_steps = match which { Which::V3_5Large => 28, Which::V3_5LargeTurbo => 4, Which::V3_5Medium => 28, Which::V3Medium => 28, }; let num_inference_steps = num_inference_steps.unwrap_or(default_inference_steps); let default_cfg_scale = match which { Which::V3_5Large => 4.0, Which::V3_5LargeTurbo => 1.0, Which::V3_5Medium => 4.0, Which::V3Medium => 4.0, }; let cfg_scale = cfg_scale.unwrap_or(default_cfg_scale); let api = hf_hub::api::sync::Api::new()?; let (mmdit_config, mut triple, vb) = if which.is_3_5() { let sai_repo_for_text_encoders = { let name = match which { Which::V3_5Large => "stabilityai/stable-diffusion-3.5-large", Which::V3_5LargeTurbo => "stabilityai/stable-diffusion-3.5-large-turbo", // Unfortunately, stabilityai/stable-diffusion-3.5-medium doesn't have the monolithic text encoders that's usually // placed under the text_encoders directory, like the case in stabilityai/stable-diffusion-3.5-large and -large-turbo. // To make things worse, it currently only has partitioned model.fp16-00001-of-00002.safetensors and model.fp16-00002-of-00002.safetensors // under the text_encoder_3 directory, for the t5xxl_fp16.safetensors model. This means that we need to merge the two partitions // to get the monolithic text encoders. This is not a trivial task. // Since the situation can change, we do not want to spend efforts to handle the uniqueness of stabilityai/stable-diffusion-3.5-medium, // which involves different paths and merging the two partitions files for t5xxl_fp16.safetensors. // so for now, we'll use the text encoder models from the stabilityai/stable-diffusion-3.5-large repository. // TODO: Change to "stabilityai/stable-diffusion-3.5-medium" once the maintainers of the repository add back the monolithic text encoders. Which::V3_5Medium => "stabilityai/stable-diffusion-3.5-large", Which::V3Medium => unreachable!(), }; api.repo(hf_hub::Repo::model(name.to_string())) }; let sai_repo_for_mmdit = { let name = match which { Which::V3_5Large => "stabilityai/stable-diffusion-3.5-large", Which::V3_5LargeTurbo => "stabilityai/stable-diffusion-3.5-large-turbo", Which::V3_5Medium => "stabilityai/stable-diffusion-3.5-medium", Which::V3Medium => unreachable!(), }; api.repo(hf_hub::Repo::model(name.to_string())) }; let clip_g_file = sai_repo_for_text_encoders.get("text_encoders/clip_g.safetensors")?; let clip_l_file = sai_repo_for_text_encoders.get("text_encoders/clip_l.safetensors")?; let t5xxl_file = sai_repo_for_text_encoders.get("text_encoders/t5xxl_fp16.safetensors")?; let model_file = { let model_file = match which { Which::V3_5Large => "sd3.5_large.safetensors", Which::V3_5LargeTurbo => "sd3.5_large_turbo.safetensors", Which::V3_5Medium => "sd3.5_medium.safetensors", Which::V3Medium => unreachable!(), }; sai_repo_for_mmdit.get(model_file)? }; let triple = StableDiffusion3TripleClipWithTokenizer::new_split( &clip_g_file, &clip_l_file, &t5xxl_file, &device, )?; let vb = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[model_file], DType::F16, &device)? }; match which { Which::V3_5Large => (MMDiTConfig::sd3_5_large(), triple, vb), Which::V3_5LargeTurbo => (MMDiTConfig::sd3_5_large(), triple, vb), Which::V3_5Medium => (MMDiTConfig::sd3_5_medium(), triple, vb), Which::V3Medium => unreachable!(), } } else { let sai_repo = { let name = "stabilityai/stable-diffusion-3-medium"; api.repo(hf_hub::Repo::model(name.to_string())) }; let model_file = sai_repo.get("sd3_medium_incl_clips_t5xxlfp16.safetensors")?; let vb = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[&model_file], DType::F16, &device)? }; let triple = StableDiffusion3TripleClipWithTokenizer::new(vb.pp("text_encoders"))?; (MMDiTConfig::sd3_medium(), triple, vb) }; let (context, y) = triple.encode_text_to_embedding(prompt.as_str(), &device)?; let (context_uncond, y_uncond) = triple.encode_text_to_embedding(uncond_prompt.as_str(), &device)?; // Drop the text model early to avoid using too much memory. drop(triple); let context = Tensor::cat(&[context, context_uncond], 0)?; let y = Tensor::cat(&[y, y_uncond], 0)?; if let Some(seed) = seed { device.set_seed(seed)?; } let slg_config = if use_slg { match which { // https://github.com/Stability-AI/sd3.5/blob/4e484e05308d83fb77ae6f680028e6c313f9da54/sd3_infer.py#L388-L394 Which::V3_5Medium => Some(sampling::SkipLayerGuidanceConfig { scale: 2.5, start: 0.01, end: 0.2, layers: vec![7, 8, 9], }), _ => anyhow::bail!("--use-slg can only be used with 3.5-medium"), } } else { None }; let start_time = std::time::Instant::now(); let x = { let mmdit = MMDiT::new( &mmdit_config, use_flash_attn, vb.pp("model.diffusion_model"), )?; sampling::euler_sample( &mmdit, &y, &context, num_inference_steps, cfg_scale, time_shift, height, width, slg_config, )? }; let dt = start_time.elapsed().as_secs_f32(); println!( "Sampling done. {num_inference_steps} steps. {:.2}s. Average rate: {:.2} iter/s", dt, num_inference_steps as f32 / dt ); let img = { let vb_vae = vb.rename_f(sd3_vae_vb_rename).pp("first_stage_model"); let autoencoder = build_sd3_vae_autoencoder(vb_vae)?; // Apply TAESD3 scale factor. Seems to be significantly improving the quality of the image. // https://github.com/comfyanonymous/ComfyUI/blob/3c60ecd7a83da43d694e26a77ca6b93106891251/nodes.py#L721-L723 autoencoder.decode(&((x / 1.5305)? + 0.0609)?)? }; let img = ((img.clamp(-1f32, 1f32)? + 1.0)? * 127.5)?.to_dtype(candle::DType::U8)?; candle_examples::save_image(&img.i(0)?, "out.jpg")?; Ok(()) }
candle/candle-examples/examples/stable-diffusion-3/main.rs/0
{ "file_path": "candle/candle-examples/examples/stable-diffusion-3/main.rs", "repo_id": "candle", "token_count": 4715 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::{Parser, ValueEnum}; use candle::{DType, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::models::{trocr, vit}; use tokenizers::Tokenizer; mod image_processor; #[derive(Clone, Debug, Copy, ValueEnum)] enum Which { #[value(name = "base")] BaseHandwritten, #[value(name = "large")] LargeHandwritten, BasePrinted, LargePrinted, } impl Which { fn repo_and_branch_name(&self) -> (&str, &str) { match self { Self::BaseHandwritten => ("microsoft/trocr-base-handwritten", "refs/pr/3"), Self::LargeHandwritten => ("microsoft/trocr-large-handwritten", "refs/pr/6"), Self::BasePrinted => ("microsoft/trocr-base-printed", "refs/pr/7"), Self::LargePrinted => ("microsoft/trocr-large-printed", "main"), } } } #[derive(Debug, Clone, serde::Deserialize)] struct Config { encoder: vit::Config, decoder: trocr::TrOCRConfig, } #[derive(Parser, Debug)] struct Args { #[arg(long)] model: Option<String>, /// Choose the variant of the model to run. #[arg(long, default_value = "base")] which: Which, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// The image file to be processed. #[arg(long)] image: String, /// Tokenization config. #[arg(long)] tokenizer: Option<String>, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let api = hf_hub::api::sync::Api::new()?; let mut tokenizer_dec = { let tokenizer_file = match args.tokenizer { None => api .model(String::from("ToluClassics/candle-trocr-tokenizer")) .get("tokenizer.json")?, Some(tokenizer) => std::path::PathBuf::from(tokenizer), }; let tokenizer = Tokenizer::from_file(&tokenizer_file).map_err(E::msg)?; TokenOutputStream::new(tokenizer) }; let device = candle_examples::device(args.cpu)?; let vb = { let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => { let (repo, branch) = args.which.repo_and_branch_name(); api.repo(hf_hub::Repo::with_revision( repo.to_string(), hf_hub::RepoType::Model, branch.to_string(), )) .get("model.safetensors")? } }; println!("model: {:?}", model); unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? } }; let (encoder_config, decoder_config) = { let (repo, branch) = args.which.repo_and_branch_name(); let config_filename = api .repo(hf_hub::Repo::with_revision( repo.to_string(), hf_hub::RepoType::Model, branch.to_string(), )) .get("config.json")?; let config: Config = serde_json::from_reader(std::fs::File::open(config_filename)?)?; (config.encoder, config.decoder) }; let mut model = trocr::TrOCRModel::new(&encoder_config, &decoder_config, vb)?; let processor_config = image_processor::ProcessorConfig::default(); let processor = image_processor::ViTImageProcessor::new(&processor_config); let image = vec![args.image.as_str()]; let image = processor.preprocess(image)?.to_device(&device)?; let encoder_xs = model.encoder().forward(&image)?; let mut logits_processor = candle_transformers::generation::LogitsProcessor::new(1337, None, None); let mut token_ids: Vec<u32> = vec![decoder_config.decoder_start_token_id]; for index in 0..1000 { let context_size = if index >= 1 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = model.decode(&input_ids, &encoder_xs, start_pos)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; token_ids.push(token); if let Some(t) = tokenizer_dec.next_token(token)? { use std::io::Write; print!("{t}"); std::io::stdout().flush()?; } if token == decoder_config.eos_token_id { break; } } if let Some(rest) = tokenizer_dec.decode_rest().map_err(E::msg)? { print!("{rest}"); } println!(); Ok(()) }
candle/candle-examples/examples/trocr/main.rs/0
{ "file_path": "candle/candle-examples/examples/trocr/main.rs", "repo_id": "candle", "token_count": 2167 }
#include "kernels.h" #include "kernel_helpers.h" #include "flash_fwd_launch_template.h" void run_mha_fwd(Flash_fwd_params &params, cudaStream_t stream) { FP16_SWITCH(!params.is_bf16, [&] { HEADDIM_SWITCH(params.d, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { run_mha_fwd_<elem_type, kHeadDim, Is_causal>(params, stream); }); }); }); } extern "C" void run_mha( void *q_ptr, void *k_ptr, void *v_ptr, void *o_ptr, void *softmax_lse_ptr, void *alibi_slopes_ptr, int32_t *cu_seqlens_q_ptr, int32_t *cu_seqlens_k_ptr, uint32_t q_batch_stride, uint32_t k_batch_stride, uint32_t v_batch_stride, uint32_t o_batch_stride, uint32_t alibi_slopes_batch_stride, uint32_t q_row_stride, uint32_t k_row_stride, uint32_t v_row_stride, uint32_t o_row_stride, uint32_t q_head_stride, uint32_t k_head_stride, uint32_t v_head_stride, uint32_t o_head_stride, uint32_t b, uint32_t h, uint32_t h_k, uint32_t d, uint32_t d_rounded, float softmax_scale, uint32_t seqlen_q, uint32_t seqlen_k, uint32_t seqlen_q_rounded, uint32_t seqlen_k_rounded, int is_bf16, int is_causal, int unpadded_lse, int window_size_left, int window_size_right, float softcap ) { Flash_fwd_params params; // Reset the parameters memset(&params, 0, sizeof(params)); // Set the pointers and strides. params.q_ptr = q_ptr; params.k_ptr = k_ptr; params.v_ptr = v_ptr; params.o_ptr = o_ptr; params.softmax_lse_ptr = softmax_lse_ptr; params.alibi_slopes_ptr = alibi_slopes_ptr; // All stride are in elements, not bytes. params.q_batch_stride = q_batch_stride; params.k_batch_stride = k_batch_stride; params.v_batch_stride = v_batch_stride; params.o_batch_stride = o_batch_stride; params.alibi_slopes_batch_stride = alibi_slopes_batch_stride; params.q_row_stride = q_row_stride; params.k_row_stride = k_row_stride; params.v_row_stride = v_row_stride; params.o_row_stride = o_row_stride; params.q_head_stride = q_head_stride; params.k_head_stride = k_head_stride; params.v_head_stride = v_head_stride; params.o_head_stride = o_head_stride; // Set the dimensions. params.b = b; params.h = h; params.h_k = h_k; params.h_h_k_ratio = h / h_k; params.seqlen_q = seqlen_q; params.seqlen_k = seqlen_k; params.seqlen_q_rounded = seqlen_q_rounded; params.seqlen_k_rounded = seqlen_k_rounded; params.d = d; params.d_rounded = d_rounded; // Set the different scale values. if (softcap > 0.0) { params.softcap = softmax_scale / softcap; params.scale_softmax = softcap; params.scale_softmax_log2 = softcap * M_LOG2E; } else{ // Remove potential NaN params.softcap = 0.0; params.scale_softmax = softmax_scale; params.scale_softmax_log2 = softmax_scale * M_LOG2E; } params.p_dropout = 1.; // probability to keep params.p_dropout_in_uint8_t = uint8_t(std::floor(params.p_dropout * 255.0)); params.rp_dropout = 1.f / params.p_dropout; params.scale_softmax_rp_dropout = params.rp_dropout * params.scale_softmax; params.is_bf16 = is_bf16; params.cu_seqlens_q = cu_seqlens_q_ptr; params.cu_seqlens_k = cu_seqlens_k_ptr; params.p_ptr = nullptr; // used for `return_softmax`. params.seqused_k = nullptr; params.is_causal = is_causal; params.window_size_left = window_size_left; params.window_size_right = window_size_right; params.is_seqlens_k_cumulative = true; params.num_splits = 1; params.unpadded_lse = unpadded_lse; cudaStream_t stream = 0; // Use the default stream. run_mha_fwd(params, stream); }
candle/candle-flash-attn/kernels/flash_api.cu/0
{ "file_path": "candle/candle-flash-attn/kernels/flash_api.cu", "repo_id": "candle", "token_count": 1818 }
use anyhow::Result; use candle::{DType, Device, IndexOp, Tensor, D}; fn to_vec3_round(t: Tensor, digits: i32) -> Result<Vec<Vec<Vec<f32>>>> { let b = 10f32.powi(digits); let t = t.to_vec3::<f32>()?; let t = t .iter() .map(|t| { t.iter() .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) .collect() }) .collect(); Ok(t) } fn fa_acausal(q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32) -> Result<Tensor> { let in_dtype = q.dtype(); let q = q.to_dtype(DType::F32)?; let k = k.to_dtype(DType::F32)?; let v = v.to_dtype(DType::F32)?; let att = (q.matmul(&k.t()?)? * softmax_scale as f64)?; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let output = att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?; Ok(output) } fn fa_acausal_softcap(q: &Tensor, k: &Tensor, v: &Tensor, softcap: f32) -> Result<Tensor> { let in_dtype = q.dtype(); let q = q.to_dtype(DType::F32)?; let k = k.to_dtype(DType::F32)?; let v = v.to_dtype(DType::F32)?; // let att = (q.matmul(&k.t()?)? * softmax_scale as f64)?; let att = q.matmul(&k.t()?)?; let att = (softcap as f64 * ((att / softcap as f64)?.tanh())?)?; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let output = att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?; Ok(output) } #[test] fn flash_attn_acausal() -> Result<()> { let device = Device::new_cuda(0)?; let q = Tensor::arange(0u32, 48, &device)? .to_dtype(DType::F16)? .reshape((1, 3, 2, 8))?; let k = (&q / 40.)?; let v = (&q / 50.)?; let q = (&q / 30.)?; let ys1 = fa_acausal(&q, &k, &v, 0.5)?; let ys1 = ys1.i(0)?.to_dtype(DType::F32)?; let ys2 = { let q = q.transpose(1, 2)?; let k = k.transpose(1, 2)?; let v = v.transpose(1, 2)?; candle_flash_attn::flash_attn(&q, &k, &v, 0.5, false)?.transpose(1, 2)? }; let ys2 = ys2.i(0)?.to_dtype(DType::F32)?; let diff = ys1.sub(&ys2)?.abs()?.flatten_all()?.max(0)?; assert_eq!(ys1.dims(), &[3, 2, 8]); assert_eq!( to_vec3_round(ys1, 4)?, &[ [ [0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238], [0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322] ], [ [0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605], [0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684] ], [ [0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955], [0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023] ] ] ); assert_eq!(ys2.dims(), &[3, 2, 8]); assert_eq!( to_vec3_round(ys2, 4)?, &[ [ [0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238], [0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322] ], [ [0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605], [0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684] ], [ [0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955], [0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023] ] ] ); assert!(diff.to_vec0::<f32>()?.abs() < 1e-5); Ok(()) } #[test] fn flash_attn_acausal_softcap() -> Result<()> { let device = Device::new_cuda(0)?; let q = Tensor::arange(0u32, 3 * 5 * 8, &device)? .to_dtype(DType::F16)? .reshape((1, 3, 5, 8))?; let k = (&q / 40.)?; let v = (&q / 50.)?; let q = (&q / 30.)?; let softcap = 5.0f32; let ys1 = fa_acausal_softcap(&q, &k, &v, softcap.clone())?; let ys1 = ys1.i(0)?.to_dtype(DType::F32)?; let ys2 = { let q = q.transpose(1, 2)?; let k = k.transpose(1, 2)?; let v = v.transpose(1, 2)?; candle_flash_attn::flash_attn_alibi_windowed_softcap( &q, &k, &v, None, // alibi_slopes // 1.0, // softmax // None, // window_size_left // None, // window_size_right // softcap.clone(), // softcap // )? .transpose(1, 2)? }; let ys2 = ys2.i(0)?.to_dtype(DType::F32)?; let diff = ys1.sub(&ys2)?.abs()?.flatten_all()?.max(0)?; assert_eq!(ys1.dims(), &[3, 5, 8]); assert_eq!(ys2.dims(), &[3, 5, 8]); assert!(diff.to_vec0::<f32>()?.abs() < 1e-3); Ok(()) } #[test] fn flash_attn_varlen() -> Result<()> { let device = Device::new_cuda(0)?; let q = Tensor::arange(0u32, 48, &device)? .to_dtype(DType::F16)? .reshape((3, 2, 8))?; let k = (&q / 40.)?; let v = (&q / 50.)?; let q = (&q / 30.)?; let seqlens_q = Tensor::new(&[0u32, 2u32], &device)?; let seqlens_k = Tensor::new(&[0u32, 2u32], &device)?; let ys = { let q = q.transpose(0, 1)?; let k = k.transpose(0, 1)?; let v = v.transpose(0, 1)?; candle_flash_attn::flash_attn_varlen( &q, &k, &v, &seqlens_q, &seqlens_k, 32, 32, 0.5, false, )? .transpose(0, 1)? }; let ys = ys.to_dtype(DType::F32)?; assert_eq!(ys.dims(), &[3, 2, 8]); assert_eq!( to_vec3_round(ys, 4)?, &[ [ [0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238], [0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322] ], [ [0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605], [0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684] ], [ [0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955], [0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023] ] ] ); Ok(()) }
candle/candle-flash-attn/tests/flash_attn_tests.rs/0
{ "file_path": "candle/candle-flash-attn/tests/flash_attn_tests.rs", "repo_id": "candle", "token_count": 3779 }
// Adapted from https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda/argsort.cu #define SORT_ORDER_ASC 1 #define SORT_ORDER_DESC 0 #include "cuda_utils.cuh" #include<stdint.h> template<typename T> static inline __device__ void ggml_cuda_swap(T & a, T & b) { T tmp = a; a = b; b = tmp; } template<int order, typename T> static __device__ void k_argsort(const T * x, uint32_t * dst, const int ncols, int ncols_pad) { // bitonic sort int col = threadIdx.x; int row = blockIdx.y; if (col >= ncols_pad) { return; } const T * x_row = x + row * ncols; extern __shared__ int dst_row[]; // initialize indices dst_row[col] = col; __syncthreads(); for (int k = 2; k <= ncols_pad; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (dst_row[col] >= ncols || (dst_row[ixj] < ncols && (order == SORT_ORDER_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]])) ) { ggml_cuda_swap(dst_row[col], dst_row[ixj]); } } else { if (dst_row[ixj] >= ncols || (dst_row[col] < ncols && (order == SORT_ORDER_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]])) ) { ggml_cuda_swap(dst_row[col], dst_row[ixj]); } } } __syncthreads(); } } // copy the result to dst without the padding if (col < ncols) { dst[row * ncols + col] = dst_row[col]; } } #define ASORT_OP(TYPENAME, RUST_NAME) \ extern "C" __global__ void asort_asc_##RUST_NAME( \ const TYPENAME * x, uint32_t * dst, const int ncols, int ncols_pad \ ) { \ k_argsort<SORT_ORDER_ASC>(x, dst, ncols, ncols_pad); \ } \ extern "C" __global__ void asort_desc_##RUST_NAME( \ const TYPENAME * x, uint32_t * dst, const int ncols, int ncols_pad \ ) { \ k_argsort<SORT_ORDER_DESC>(x, dst, ncols, ncols_pad); \ } \ #if __CUDA_ARCH__ >= 800 ASORT_OP(__nv_bfloat16, bf16) #endif #if __CUDA_ARCH__ >= 530 ASORT_OP(__half, f16) #endif ASORT_OP(float, f32) ASORT_OP(double, f64) ASORT_OP(uint8_t, u8) ASORT_OP(uint32_t, u32) ASORT_OP(int64_t, i64)
candle/candle-kernels/src/sort.cu/0
{ "file_path": "candle/candle-kernels/src/sort.cu", "repo_id": "candle", "token_count": 1469 }
// Imported from https://github.com/ggerganov/llama.cpp/blob/master/ggml-metal.metal #include <metal_stdlib> using namespace metal; #define MAX(x, y) ((x) > (y) ? (x) : (y)) #define MIN(x, y) ((x) < (y) ? (x) : (y)) #define SWAP(x, y) { auto tmp = (x); (x) = (y); (y) = tmp; } #define QK4_0 32 #define QR4_0 2 typedef struct { half d; // delta uint8_t qs[QK4_0 / 2]; // nibbles / quants } block_q4_0; #define QK4_1 32 typedef struct { half d; // delta half m; // min uint8_t qs[QK4_1 / 2]; // nibbles / quants } block_q4_1; #define QK5_0 32 typedef struct { half d; // delta uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_0 / 2]; // nibbles / quants } block_q5_0; #define QK5_1 32 typedef struct { half d; // delta half m; // min uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_1 / 2]; // nibbles / quants } block_q5_1; #define QK8_0 32 typedef struct { half d; // delta int8_t qs[QK8_0]; // quants } block_q8_0; #define N_SIMDWIDTH 32 // assuming SIMD group size is 32 enum ggml_sort_order { GGML_SORT_ASC, GGML_SORT_DESC, }; // general-purpose kernel for addition, multiplication and division of two tensors // pros: works for non-contiguous tensors, supports broadcast across all dims // cons: not very efficient kernel void kernel_add( device const char * src0, device const char * src1, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, constant int64_t & offs, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig.z; const int64_t i02 = tgpig.y; const int64_t i01 = tgpig.x; const int64_t i13 = i03 % ne13; const int64_t i12 = i02 % ne12; const int64_t i11 = i01 % ne11; device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01 + offs; device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + offs; for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { const int i10 = i0 % ne10; *((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) + *((device float *)(src1_ptr + i10*nb10)); } } kernel void kernel_mul( device const char * src0, device const char * src1, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig.z; const int64_t i02 = tgpig.y; const int64_t i01 = tgpig.x; const int64_t i13 = i03 % ne13; const int64_t i12 = i02 % ne12; const int64_t i11 = i01 % ne11; device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { const int i10 = i0 % ne10; *((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) * *((device float *)(src1_ptr + i10*nb10)); } } kernel void kernel_div( device const char * src0, device const char * src1, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig.z; const int64_t i02 = tgpig.y; const int64_t i01 = tgpig.x; const int64_t i13 = i03 % ne13; const int64_t i12 = i02 % ne12; const int64_t i11 = i01 % ne11; device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { const int i10 = i0 % ne10; *((device float *)(dst_ptr + i0*nb0)) = *((device float *)(src0_ptr + i0*nb00)) / *((device float *)(src1_ptr + i10*nb10)); } } // assumption: src1 is a row // broadcast src1 into src0 kernel void kernel_add_row( device const float4 * src0, device const float4 * src1, device float4 * dst, constant uint64_t & nb [[buffer(28)]], uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] + src1[tpig % nb]; } kernel void kernel_mul_row( device const float4 * src0, device const float4 * src1, device float4 * dst, constant uint64_t & nb [[buffer(28)]], uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * src1[tpig % nb]; } kernel void kernel_div_row( device const float4 * src0, device const float4 * src1, device float4 * dst, constant uint64_t & nb [[buffer(28)]], uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] / src1[tpig % nb]; } kernel void kernel_scale( device const float * src0, device float * dst, constant float & scale, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * scale; } kernel void kernel_scale_4( device const float4 * src0, device float4 * dst, constant float & scale, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * scale; } kernel void kernel_relu( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = max(0.0f, src0[tpig]); } kernel void kernel_tanh( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { device const float & x = src0[tpig]; dst[tpig] = precise::tanh(x); } constant float GELU_COEF_A = 0.044715f; constant float GELU_QUICK_COEF = -1.702f; constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; kernel void kernel_gelu( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; // BEWARE !!! // Simply using "tanh" instead of "precise::tanh" will sometimes results in NaNs! // This was observed with Falcon 7B and 40B models // dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); } kernel void kernel_gelu_quick( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; dst[tpig] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x))); } kernel void kernel_silu( device const float4 * src0, device float4 * dst, uint tpig[[thread_position_in_grid]]) { device const float4 & x = src0[tpig]; dst[tpig] = x / (1.0f + exp(-x)); } kernel void kernel_sqr( device const float * src0, device float * dst, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] * src0[tpig]; } kernel void kernel_sum_rows( device const float * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tpig[[thread_position_in_grid]]) { int64_t i3 = tpig.z; int64_t i2 = tpig.y; int64_t i1 = tpig.x; if (i3 >= ne03 || i2 >= ne02 || i1 >= ne01) { return; } device const float * src_row = (device const float *) ((device const char *) src0 + i1*nb01 + i2*nb02 + i3*nb03); device float * dst_row = (device float *) ((device char *) dst + i1*nb1 + i2*nb2 + i3*nb3); float row_sum = 0; for (int64_t i0 = 0; i0 < ne00; i0++) { row_sum += src_row[i0]; } dst_row[0] = row_sum; } kernel void kernel_soft_max( device const float * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant float & scale, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { const int64_t i03 = (tgpig) / (ne02*ne01); const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01; const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01); device const float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; device const float * pmask = src1 != src0 ? src1 + i01*ne00 : nullptr; device float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; // parallel max float lmax = -INFINITY; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { lmax = MAX(lmax, psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f)); } // find the max value in the block float max_val = simd_max(lmax); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = -INFINITY; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = max_val; } threadgroup_barrier(mem_flags::mem_threadgroup); max_val = buf[tiisg]; max_val = simd_max(max_val); } // parallel sum float lsum = 0.0f; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { const float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f)) - max_val); lsum += exp_psrc0; pdst[i00] = exp_psrc0; } // This barrier fixes a failing test // ref: https://github.com/ggerganov/ggml/pull/621#discussion_r1425156335 threadgroup_barrier(mem_flags::mem_none); float sum = simd_sum(lsum); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = sum; } threadgroup_barrier(mem_flags::mem_threadgroup); sum = buf[tiisg]; sum = simd_sum(sum); } const float inv_sum = 1.0f/sum; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { pdst[i00] *= inv_sum; } } kernel void kernel_soft_max_4( device const float * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant float & scale, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { const int64_t i03 = (tgpig) / (ne02*ne01); const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01; const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01); device const float4 * psrc4 = (device const float4 *)(src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); device const float4 * pmask = src1 != src0 ? (device const float4 *)(src1 + i01*ne00) : nullptr; device float4 * pdst4 = (device float4 *)(dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); // parallel max float4 lmax4 = -INFINITY; for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { lmax4 = fmax(lmax4, psrc4[i00]*scale + (pmask ? pmask[i00] : 0.0f)); } const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3])); float max_val = simd_max(lmax); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = -INFINITY; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = max_val; } threadgroup_barrier(mem_flags::mem_threadgroup); max_val = buf[tiisg]; max_val = simd_max(max_val); } // parallel sum float4 lsum4 = 0.0f; for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { const float4 exp_psrc4 = exp((psrc4[i00]*scale + (pmask ? pmask[i00] : 0.0f)) - max_val); lsum4 += exp_psrc4; pdst4[i00] = exp_psrc4; } const float lsum = lsum4[0] + lsum4[1] + lsum4[2] + lsum4[3]; // This barrier fixes a failing test // ref: https://github.com/ggerganov/ggml/pull/621#discussion_r1425156335 threadgroup_barrier(mem_flags::mem_none); float sum = simd_sum(lsum); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = sum; } threadgroup_barrier(mem_flags::mem_threadgroup); sum = buf[tiisg]; sum = simd_sum(sum); } const float inv_sum = 1.0f/sum; for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { pdst4[i00] *= inv_sum; } } kernel void kernel_diag_mask_inf( device const float * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int & n_past, uint3 tpig[[thread_position_in_grid]]) { const int64_t i02 = tpig[2]; const int64_t i01 = tpig[1]; const int64_t i00 = tpig[0]; if (i00 > n_past + i01) { dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY; } else { dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00]; } } kernel void kernel_diag_mask_inf_8( device const float4 * src0, device float4 * dst, constant int64_t & ne00, constant int64_t & ne01, constant int & n_past, uint3 tpig[[thread_position_in_grid]]) { const int64_t i = 2*tpig[0]; dst[i+0] = src0[i+0]; dst[i+1] = src0[i+1]; int64_t i4 = 4*i; const int64_t i02 = i4/(ne00*ne01); i4 -= i02*ne00*ne01; const int64_t i01 = i4/(ne00); i4 -= i01*ne00; const int64_t i00 = i4; for (int k = 3; k >= 0; --k) { if (i00 + 4 + k <= n_past + i01) { break; } dst[i+1][k] = -INFINITY; if (i00 + k > n_past + i01) { dst[i][k] = -INFINITY; } } } kernel void kernel_norm( device const void * src0, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant float & eps, threadgroup float * sum [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint ntg[[threads_per_threadgroup]]) { device const float * x = (device const float *) ((device const char *) src0 + tgpig*nb01); // MEAN // parallel sum sum[tpitg] = 0.0f; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { sum[tpitg] += x[i00]; } // reduce threadgroup_barrier(mem_flags::mem_threadgroup); for (uint i = ntg/2; i > 0; i /= 2) { if (tpitg < i) { sum[tpitg] += sum[tpitg + i]; } threadgroup_barrier(mem_flags::mem_threadgroup); } const float mean = sum[0] / ne00; // recenter and VARIANCE threadgroup_barrier(mem_flags::mem_threadgroup); device float * y = dst + tgpig*ne00; sum[tpitg] = 0.0f; for (int i00 = tpitg; i00 < ne00; i00 += ntg) { y[i00] = x[i00] - mean; sum[tpitg] += y[i00] * y[i00]; } // reduce threadgroup_barrier(mem_flags::mem_threadgroup); for (uint i = ntg/2; i > 0; i /= 2) { if (tpitg < i) { sum[tpitg] += sum[tpitg + i]; } threadgroup_barrier(mem_flags::mem_threadgroup); } const float variance = sum[0] / ne00; const float scale = 1.0f/sqrt(variance + eps); for (int i00 = tpitg; i00 < ne00; i00 += ntg) { y[i00] = y[i00] * scale; } } kernel void kernel_rms_norm( device const void * src0, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant float & eps, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { device const float4 * x = (device const float4 *) ((device const char *) src0 + tgpig*nb01); float4 sumf = 0; float all_sum = 0; // parallel sum for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { sumf += x[i00] * x[i00]; } all_sum = sumf[0] + sumf[1] + sumf[2] + sumf[3]; all_sum = simd_sum(all_sum); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = all_sum; } threadgroup_barrier(mem_flags::mem_threadgroup); all_sum = buf[tiisg]; all_sum = simd_sum(all_sum); } const float mean = all_sum/ne00; const float scale = 1.0f/sqrt(mean + eps); device float4 * y = (device float4 *) (dst + tgpig*ne00); for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { y[i00] = x[i00] * scale; } } kernel void kernel_group_norm( device const float * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int32_t & n_groups, constant float & eps, threadgroup float * buf [[threadgroup(0)]], uint tgpig[[threadgroup_position_in_grid]], uint tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint ntg[[threads_per_threadgroup]]) { const int64_t ne = ne00*ne01*ne02; const int64_t gs = ne00*ne01*((ne02 + n_groups - 1) / n_groups); int start = tgpig * gs; int end = start + gs; start += tpitg; if (end >= ne) { end = ne; } float tmp = 0.0f; // partial sum for thread in warp for (int j = start; j < end; j += ntg) { tmp += src0[j]; } threadgroup_barrier(mem_flags::mem_threadgroup); tmp = simd_sum(tmp); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = tmp; } threadgroup_barrier(mem_flags::mem_threadgroup); tmp = buf[tiisg]; tmp = simd_sum(tmp); } const float mean = tmp / gs; tmp = 0.0f; for (int j = start; j < end; j += ntg) { float xi = src0[j] - mean; dst[j] = xi; tmp += xi * xi; } tmp = simd_sum(tmp); if (ntg > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } threadgroup_barrier(mem_flags::mem_threadgroup); if (tiisg == 0) { buf[sgitg] = tmp; } threadgroup_barrier(mem_flags::mem_threadgroup); tmp = buf[tiisg]; tmp = simd_sum(tmp); } const float variance = tmp / gs; const float scale = 1.0f/sqrt(variance + eps); for (int j = start; j < end; j += ntg) { dst[j] *= scale; } } // function for calculate inner product between half a q4_0 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q4 quants begin (0 or QK4_0/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q4_0 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float2 acc = 0.f; device const uint16_t * qs = ((device const uint16_t *)qb_curr + 1 + il/2); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F) + yl[i + 1] * (qs[i / 2] & 0x0F00); acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0) + yl[i + 9] * (qs[i / 2] & 0xF000); } return d * (sumy * -8.f + acc[0] + acc[1]); } // function for calculate inner product between half a q4_1 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q4 quants begin (0 or QK4_0/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float m = qb_curr->m; float2 acc = 0.f; device const uint16_t * qs = ((device const uint16_t *)qb_curr + 2 + il/2); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F) + yl[i + 1] * (qs[i / 2] & 0x0F00); acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0) + yl[i + 9] * (qs[i / 2] & 0xF000); } return d * (acc[0] + acc[1]) + sumy * m; } // function for calculate inner product between half a q5_0 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q5 quants begin (0 or QK5_0/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q5_0 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float2 acc = 0.f; device const uint16_t * qs = ((device const uint16_t *)qb_curr + 3 + il/2); const uint32_t qh = *((device const uint32_t *)qb_curr->qh); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010)) + yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000)); acc[1] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100)) + yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000)); } return d * (sumy * -16.f + acc[0] + acc[1]); } // function for calculate inner product between half a q5_1 block and 16 floats (yl), sumy is SUM(yl[i]) // il indicates where the q5 quants begin (0 or QK5_1/4) // we assume that the yl's have been multiplied with the appropriate scale factor // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) inline float block_q_n_dot_y(device const block_q5_1 * qb_curr, float sumy, thread float * yl, int il) { float d = qb_curr->d; float m = qb_curr->m; float2 acc = 0.f; device const uint16_t * qs = ((device const uint16_t *)qb_curr + 4 + il/2); const uint32_t qh = *((device const uint32_t *)qb_curr->qh); for (int i = 0; i < 8; i+=2) { acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010)) + yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000)); acc[1] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100)) + yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000)); } return d * (acc[0] + acc[1]) + sumy * m; } // putting them in the kernel cause a significant performance penalty #define N_DST 4 // each SIMD group works on 4 rows #define N_SIMDGROUP 2 // number of SIMD groups in a thread group //Note: This is a template, but strictly speaking it only applies to // quantizations where the block size is 32. It also does not // guard against the number of rows not being divisible by // N_DST, so this is another explicit assumption of the implementation. template<typename block_q_type, int nr, int nsg, int nw> void mul_vec_q_n_f32_impl( device const void * src0, device const float * src1, device float * dst, int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne10, int64_t ne12, int64_t ne0, int64_t ne1, uint r2, uint r3, uint3 tgpig, uint tiisg, uint sgitg) { const int nb = ne00/QK4_0; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * nsg + sgitg) * nr; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q_type * x = (device const block_q_type *) src0 + offset0; device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[16]; // src1 vector cache float sumf[nr] = {0.f}; const int ix = (tiisg/2); const int il = (tiisg%2)*8; device const float * yb = y + ix * QK4_0 + il; // each thread in a SIMD group deals with half a block. for (int ib = ix; ib < nb; ib += nw/2) { float sumy = 0; for (int i = 0; i < 8; i += 2) { sumy += yb[i] + yb[i+1]; yl[i+0] = yb[i+ 0]; yl[i+1] = yb[i+ 1]/256.f; sumy += yb[i+16] + yb[i+17]; yl[i+8] = yb[i+16]/16.f; yl[i+9] = yb[i+17]/4096.f; } for (int row = 0; row < nr; row++) { sumf[row] += block_q_n_dot_y(x+ib+row*nb, sumy, yl, il); } yb += QK4_0 * 16; } for (int row = 0; row < nr; ++row) { const float tot = simd_sum(sumf[row]); if (tiisg == 0 && first_row + row < ne01) { dst[im*ne0*ne1 + r1*ne0 + first_row + row] = tot; } } } kernel void kernel_mul_mv_q4_0_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg); } kernel void kernel_mul_mv_q4_1_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl<block_q4_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg); } kernel void kernel_mul_mv_q5_0_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl<block_q5_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg); } kernel void kernel_mul_mv_q5_1_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { mul_vec_q_n_f32_impl<block_q5_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg); } #define NB_Q8_0 8 void kernel_mul_mv_q8_0_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nr = N_DST; const int nsg = N_SIMDGROUP; const int nw = N_SIMDWIDTH; const int nb = ne00/QK8_0; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * nsg + sgitg) * nr; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q8_0 * x = (device const block_q8_0 *) src0 + offset0; device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[NB_Q8_0]; float sumf[nr]={0.f}; const int ix = tiisg/4; const int il = tiisg%4; device const float * yb = y + ix * QK8_0 + NB_Q8_0*il; // each thread in a SIMD group deals with NB_Q8_0 quants at a time for (int ib = ix; ib < nb; ib += nw/4) { for (int i = 0; i < NB_Q8_0; ++i) { yl[i] = yb[i]; } for (int row = 0; row < nr; row++) { device const int8_t * qs = x[ib+row*nb].qs + NB_Q8_0*il; float sumq = 0.f; for (int iq = 0; iq < NB_Q8_0; ++iq) { sumq += qs[iq] * yl[iq]; } sumf[row] += sumq*x[ib+row*nb].d; } yb += NB_Q8_0 * nw; } for (int row = 0; row < nr; ++row) { const float tot = simd_sum(sumf[row]); if (tiisg == 0 && first_row + row < ne01) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot; } } } [[host_name("kernel_mul_mv_q8_0_f32")]] kernel void kernel_mul_mv_q8_0_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q8_0_f32_impl(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,r2,r3,tgpig,tiisg,sgitg); } #define N_F32_F32 4 void kernel_mul_mv_f32_f32_impl( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { const int64_t r0 = tgpig.x; const int64_t rb = tgpig.y*N_F32_F32; const int64_t im = tgpig.z; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; device const float * x = (device const float *) (src0 + offset0); if (ne00 < 128) { for (int row = 0; row < N_F32_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); float sumf = 0; for (int i = tiisg; i < ne00; i += 32) { sumf += (float) x[i] * (float) y[i]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } else { device const float4 * x4 = (device const float4 *)x; for (int row = 0; row < N_F32_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); device const float4 * y4 = (device const float4 *) y; float sumf = 0; for (int i = tiisg; i < ne00/4; i += 32) { for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i]; dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } } [[host_name("kernel_mul_mv_f32_f32")]] kernel void kernel_mul_mv_f32_f32( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { kernel_mul_mv_f32_f32_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); } #define N_F16_F16 4 kernel void kernel_mul_mv_f16_f16( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { const int64_t r0 = tgpig.x; const int64_t rb = tgpig.y*N_F16_F16; const int64_t im = tgpig.z; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; device const half * x = (device const half *) (src0 + offset0); if (ne00 < 128) { for (int row = 0; row < N_F16_F16; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const half * y = (device const half *) (src1 + r1*nb11 + im*nb12); float sumf = 0; for (int i = tiisg; i < ne00; i += 32) { sumf += (half) x[i] * (half) y[i]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } else { device const half4 * x4 = (device const half4 *)x; for (int row = 0; row < N_F16_F16; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const half * y = (device const half *) (src1 + r1*nb11 + im*nb12); device const half4 * y4 = (device const half4 *) y; float sumf = 0; for (int i = tiisg; i < ne00/4; i += 32) { for (int k = 0; k < 4; ++k) sumf += (half) x4[i][k] * y4[i][k]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (half) x[i] * y[i]; dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } } void kernel_mul_mv_f16_f32_1row_impl( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; const int64_t im = tgpig.z; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; device const half * x = (device const half *) (src0 + offset0); device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); float sumf = 0; if (ne00 < 128) { for (int i = tiisg; i < ne00; i += 32) { sumf += (float) x[i] * (float) y[i]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } else { device const half4 * x4 = (device const half4 *) x; device const float4 * y4 = (device const float4 *) y; for (int i = tiisg; i < ne00/4; i += 32) { for (int k = 0; k < 4; ++k) sumf += (float)x4[i][k] * y4[i][k]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i]; dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } [[host_name("kernel_mul_mv_f16_f32_1row")]] kernel void kernel_mul_mv_f16_f32_1row( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { kernel_mul_mv_f16_f32_1row_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); } #define N_F16_F32 4 void kernel_mul_mv_f16_f32_impl( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { const int64_t r0 = tgpig.x; const int64_t rb = tgpig.y*N_F16_F32; const int64_t im = tgpig.z; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; device const half * x = (device const half *) (src0 + offset0); if (ne00 < 128) { for (int row = 0; row < N_F16_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); float sumf = 0; for (int i = tiisg; i < ne00; i += 32) { sumf += (float) x[i] * (float) y[i]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } else { device const half4 * x4 = (device const half4 *)x; for (int row = 0; row < N_F16_F32; ++row) { int r1 = rb + row; if (r1 >= ne11) { break; } device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12); device const float4 * y4 = (device const float4 *) y; float sumf = 0; for (int i = tiisg; i < ne00/4; i += 32) { for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (float) x[i] * y[i]; dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } } [[host_name("kernel_mul_mv_f16_f32")]] kernel void kernel_mul_mv_f16_f32( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { kernel_mul_mv_f16_f32_impl(src0, src1, dst, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); } // Assumes row size (ne00) is a multiple of 4 kernel void kernel_mul_mv_f16_f32_l4( device const char * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]]) { const int nrows = ne11; const int64_t r0 = tgpig.x; const int64_t im = tgpig.z; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb02*ne02; device const half4 * x4 = (device const half4 *) (src0 + offset0); for (int r1 = 0; r1 < nrows; ++r1) { device const float4 * y4 = (device const float4 *) (src1 + r1*nb11 + im*nb12); float sumf = 0; for (int i = tiisg; i < ne00/4; i += 32) { for (int k = 0; k < 4; ++k) sumf += (float) x4[i][k] * y4[i][k]; } float all_sum = simd_sum(sumf); if (tiisg == 0) { dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; } } } kernel void kernel_alibi_f32( device const float * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, constant float & m0, constant float & m1, constant int & n_heads_log2_floor, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; //const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); const int64_t k = i3*ne3 + i2; float m_k; if (k < n_heads_log2_floor) { m_k = pow(m0, k + 1); } else { m_k = pow(m1, 2 * (k - n_heads_log2_floor) + 1); } device char * dst_row = (device char *) dst + i3*nb3 + i2*nb2 + i1*nb1; device const char * src_row = (device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01; for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { const float src_v = *(device float *)(src_row + i00*nb00); device float * dst_v = (device float *)(dst_row + i00*nb0); *dst_v = i00 * m_k + src_v; } } static float rope_yarn_ramp(const float low, const float high, const int i0) { const float y = (i0 / 2 - low) / max(0.001f, high - low); return 1.0f - min(1.0f, max(0.0f, y)); } // YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. static void rope_yarn( float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale, thread float * cos_theta, thread float * sin_theta ) { // Get n-d rotational scaling corrected for extrapolation float theta_interp = freq_scale * theta_extrap; float theta = theta_interp; if (ext_factor != 0.0f) { float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; // Get n-d magnitude scaling corrected for interpolation mscale *= 1.0f + 0.1f * log(1.0f / freq_scale); } *cos_theta = cos(theta) * mscale; *sin_theta = sin(theta) * mscale; } // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get // `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))` static float rope_yarn_corr_factor(int n_dims, int n_orig_ctx, float n_rot, float base) { return n_dims * log(n_orig_ctx / (n_rot * 2 * M_PI_F)) / (2 * log(base)); } static void rope_yarn_corr_dims( int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2] ) { // start and end correction dims dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_fast, freq_base))); dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_slow, freq_base))); } typedef void (rope_t)( device const void * src0, device const int32_t * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, constant int & n_past, constant int & n_dims, constant int & mode, constant int & n_orig_ctx, constant float & freq_base, constant float & freq_scale, constant float & ext_factor, constant float & attn_factor, constant float & beta_fast, constant float & beta_slow, uint tiitg[[thread_index_in_threadgroup]], uint3 tptg[[threads_per_threadgroup]], uint3 tgpig[[threadgroup_position_in_grid]]); template<typename T> kernel void kernel_rope( device const void * src0, device const int32_t * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, constant int & n_past, constant int & n_dims, constant int & mode, constant int & n_orig_ctx, constant float & freq_base, constant float & freq_scale, constant float & ext_factor, constant float & attn_factor, constant float & beta_fast, constant float & beta_slow, uint tiitg[[thread_index_in_threadgroup]], uint3 tptg[[threads_per_threadgroup]], uint3 tgpig[[threadgroup_position_in_grid]]) { const int64_t i3 = tgpig[2]; const int64_t i2 = tgpig[1]; const int64_t i1 = tgpig[0]; const bool is_neox = mode & 2; float corr_dims[2]; rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims); device const int32_t * pos = src1; const int64_t p = pos[i2]; const float theta_0 = (float)p; const float inv_ndims = -1.f/n_dims; if (!is_neox) { for (int64_t i0 = 2*tiitg; i0 < ne0; i0 += 2*tptg.x) { const float theta = theta_0 * pow(freq_base, inv_ndims*i0); float cos_theta, sin_theta; rope_yarn(theta, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta); device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); const T x0 = src[0]; const T x1 = src[1]; dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[1] = x0*sin_theta + x1*cos_theta; } } else { for (int64_t ic = 2*tiitg; ic < ne0; ic += 2*tptg.x) { if (ic < n_dims) { const int64_t ib = 0; // simplified from `(ib * n_dims + ic) * inv_ndims` const float cur_rot = inv_ndims*ic - ib; const float theta = theta_0 * pow(freq_base, cur_rot); float cos_theta, sin_theta; rope_yarn(theta, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta); const int64_t i0 = ib*n_dims + ic/2; device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); const float x0 = src[0]; const float x1 = src[n_dims/2]; dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta; } else { const int64_t i0 = ic; device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } } } template [[host_name("kernel_rope_f32")]] kernel rope_t kernel_rope<float>; template [[host_name("kernel_rope_f16")]] kernel rope_t kernel_rope<half>; kernel void kernel_im2col_f16( device const float * x, device half * dst, constant int32_t & ofs0, constant int32_t & ofs1, constant int32_t & IW, constant int32_t & IH, constant int32_t & CHW, constant int32_t & s0, constant int32_t & s1, constant int32_t & p0, constant int32_t & p1, constant int32_t & d0, constant int32_t & d1, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int32_t iiw = tgpig[2] * s0 + tpitg[2] * d0 - p0; const int32_t iih = tgpig[1] * s1 + tpitg[1] * d1 - p1; const int32_t offset_dst = (tpitg[0] * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * CHW + (tgpig[0] * (ntg[1] * ntg[2]) + tpitg[1] * ntg[2] + tpitg[2]); if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst[offset_dst] = 0.0f; } else { const int32_t offset_src = tpitg[0] * ofs0 + tgpig[0] * ofs1; dst[offset_dst] = x[offset_src + iih * IW + iiw]; } } kernel void kernel_upscale_f32( device const char * src0, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, constant int32_t & sf, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i3 = tgpig.z; const int64_t i2 = tgpig.y; const int64_t i1 = tgpig.x; const int64_t i03 = i3; const int64_t i02 = i2; const int64_t i01 = i1/sf; device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1); for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { dst_ptr[i0] = src0_ptr[i0/sf]; } } kernel void kernel_pad_f32( device const char * src0, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i3 = tgpig.z; const int64_t i2 = tgpig.y; const int64_t i1 = tgpig.x; const int64_t i03 = i3; const int64_t i02 = i2; const int64_t i01 = i1; device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1); if (i1 < ne01 && i2 < ne02 && i3 < ne03) { for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { if (i0 < ne00) { dst_ptr[i0] = src0_ptr[i0]; } else { dst_ptr[i0] = 0.0f; } } return; } for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { dst_ptr[i0] = 0.0f; } } // bitonic sort implementation following the CUDA kernels as reference typedef void (argsort_t)( device const float * x, device int32_t * dst, constant int64_t & ncols, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]]); template<ggml_sort_order order> kernel void kernel_argsort_f32_i32( device const float * x, device int32_t * dst, constant int64_t & ncols, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]]) { // bitonic sort int col = tpitg[0]; int row = tgpig[1]; if (col >= ncols) return; device const float * x_row = x + row * ncols; device int32_t * dst_row = dst + row * ncols; // initialize indices if (col < ncols) { dst_row[col] = col; } threadgroup_barrier(mem_flags::mem_threadgroup); for (int k = 2; k <= ncols; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (order == GGML_SORT_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]]) { SWAP(dst_row[col], dst_row[ixj]); } } else { if (order == GGML_SORT_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]]) { SWAP(dst_row[col], dst_row[ixj]); } } } threadgroup_barrier(mem_flags::mem_threadgroup); } } } template [[host_name("kernel_argsort_f32_i32_asc")]] kernel argsort_t kernel_argsort_f32_i32<GGML_SORT_ASC>; template [[host_name("kernel_argsort_f32_i32_desc")]] kernel argsort_t kernel_argsort_f32_i32<GGML_SORT_DESC>; kernel void kernel_leaky_relu_f32( device const float * src0, device float * dst, constant float & slope, uint tpig[[thread_position_in_grid]]) { dst[tpig] = src0[tpig] > 0.0f ? src0[tpig] : src0[tpig] * slope; } kernel void kernel_cpy_f16_f16( device const half * src0, device half * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f16_f32( device const half * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f32_f16( device const float * src0, device half * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f32_f32( device const float * src0, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) { device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); dst_data[i00] = src[0]; } } kernel void kernel_cpy_f32_q8_0( device const float * src0, device void * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK8_0; device block_q8_0 * dst_data = (device block_q8_0 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x*QK8_0; i00 < ne00; i00 += ntg.x*QK8_0) { device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); float amax = 0.0f; // absolute max for (int j = 0; j < QK8_0; j++) { const float v = src[j]; amax = MAX(amax, fabs(v)); } const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; dst_data[i00/QK8_0].d = d; for (int j = 0; j < QK8_0; ++j) { const float x0 = src[j]*id; dst_data[i00/QK8_0].qs[j] = round(x0); } } } kernel void kernel_cpy_f32_q4_0( device const float * src0, device void * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK4_0; device block_q4_0 * dst_data = (device block_q4_0 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x*QK4_0; i00 < ne00; i00 += ntg.x*QK4_0) { device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); float amax = 0.0f; // absolute max float max = 0.0f; for (int j = 0; j < QK4_0; j++) { const float v = src[j]; if (amax < fabs(v)) { amax = fabs(v); max = v; } } const float d = max / -8; const float id = d ? 1.0f/d : 0.0f; dst_data[i00/QK4_0].d = d; for (int j = 0; j < QK4_0/2; ++j) { const float x0 = src[0 + j]*id; const float x1 = src[QK4_0/2 + j]*id; const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); dst_data[i00/QK4_0].qs[j] = xi0; dst_data[i00/QK4_0].qs[j] |= xi1 << 4; } } } kernel void kernel_cpy_f32_q4_1( device const float * src0, device void * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig[2]; const int64_t i02 = tgpig[1]; const int64_t i01 = tgpig[0]; const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; const int64_t i3 = n / (ne2*ne1*ne0); const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0)/QK4_1; device block_q4_1 * dst_data = (device block_q4_1 *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); for (int64_t i00 = tpitg.x*QK4_1; i00 < ne00; i00 += ntg.x*QK4_1) { device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); float min = FLT_MAX; float max = -FLT_MAX; for (int j = 0; j < QK4_1; j++) { const float v = src[j]; if (min > v) min = v; if (max < v) max = v; } const float d = (max - min) / ((1 << 4) - 1); const float id = d ? 1.0f/d : 0.0f; dst_data[i00/QK4_1].d = d; dst_data[i00/QK4_1].m = min; for (int j = 0; j < QK4_1/2; ++j) { const float x0 = (src[0 + j] - min)*id; const float x1 = (src[QK4_1/2 + j] - min)*id; const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); dst_data[i00/QK4_1].qs[j] = xi0; dst_data[i00/QK4_1].qs[j] |= xi1 << 4; } } } kernel void kernel_concat( device const char * src0, device const char * src1, device char * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, constant int64_t & ne3, constant uint64_t & nb0, constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { const int64_t i03 = tgpig.z; const int64_t i02 = tgpig.y; const int64_t i01 = tgpig.x; const int64_t i13 = i03 % ne13; const int64_t i12 = i02 % ne12; const int64_t i11 = i01 % ne11; device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01 + tpitg.x*nb00; device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11 + tpitg.x*nb10; device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + tpitg.x*nb0; for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { if (i02 < ne02) { ((device float *)dst_ptr)[0] = ((device float *)src0_ptr)[0]; src0_ptr += ntg.x*nb00; } else { ((device float *)dst_ptr)[0] = ((device float *)src1_ptr)[0]; src1_ptr += ntg.x*nb10; } dst_ptr += ntg.x*nb0; } } //============================================ k-quants ====================================================== #ifndef QK_K #define QK_K 256 #else static_assert(QK_K == 256 || QK_K == 64, "QK_K must be 256 or 64"); #endif #if QK_K == 256 #define K_SCALE_SIZE 12 #else #define K_SCALE_SIZE 4 #endif typedef struct { uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits uint8_t qs[QK_K/4]; // quants half d; // super-block scale for quantized scales half dmin; // super-block scale for quantized mins } block_q2_K; // 84 bytes / block typedef struct { uint8_t hmask[QK_K/8]; // quants - high bit uint8_t qs[QK_K/4]; // quants - low 2 bits #if QK_K == 64 uint8_t scales[2]; #else uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits #endif half d; // super-block scale } block_q3_K; #if QK_K == 64 typedef struct { half d[2]; // super-block scales/mins uint8_t scales[2]; uint8_t qs[QK_K/2]; // 4-bit quants } block_q4_K; #else typedef struct { half d; // super-block scale for quantized scales half dmin; // super-block scale for quantized mins uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_K; #endif #if QK_K == 64 typedef struct { half d; // super-block scales/mins int8_t scales[QK_K/16]; // 8-bit block scales uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; #else typedef struct { half d; // super-block scale for quantized scales half dmin; // super-block scale for quantized mins uint8_t scales[3*QK_K/64]; // scales and mins, quantized with 6 bits uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; // 176 bytes / block #endif typedef struct { uint8_t ql[QK_K/2]; // quants, lower 4 bits uint8_t qh[QK_K/4]; // quants, upper 2 bits int8_t scales[QK_K/16]; // scales, quantized with 8 bits half d; // super-block scale } block_q6_K; // 210 bytes / block //====================================== dot products ========================= void kernel_mul_mv_q2_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nb = ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; const int ib_row = first_row * nb; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q2_K * x = (device const block_q2_K *) src0 + ib_row + offset0; device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[32]; float sumf[N_DST]={0.f}, all_sum; const int step = sizeof(block_q2_K) * nb; #if QK_K == 256 const int ix = tiisg/8; // 0...3 const int it = tiisg%8; // 0...7 const int iq = it/4; // 0 or 1 const int ir = it%4; // 0...3 const int is = (8*ir)/16;// 0 or 1 device const float * y4 = y + ix * QK_K + 128 * iq + 8 * ir; for (int ib = ix; ib < nb; ib += 4) { float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; ++i) { yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0]; yl[i+ 8] = y4[i+32]; sumy[1] += yl[i+ 8]; yl[i+16] = y4[i+64]; sumy[2] += yl[i+16]; yl[i+24] = y4[i+96]; sumy[3] += yl[i+24]; } device const uint8_t * sc = (device const uint8_t *)x[ib].scales + 8*iq + is; device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir; device const half * dh = &x[ib].d; for (int row = 0; row < N_DST; row++) { float4 acc1 = {0.f, 0.f, 0.f, 0.f}; float4 acc2 = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; i += 2) { acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003); acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300); acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c); acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00); acc1[2] += yl[i+16] * (qs[i/2] & 0x0030); acc2[2] += yl[i+17] * (qs[i/2] & 0x3000); acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0); acc2[3] += yl[i+25] * (qs[i/2] & 0xc000); } float dall = dh[0]; float dmin = dh[1] * 1.f/16.f; sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f + (acc1[1] + 1.f/256.f * acc2[1]) * (sc[2] & 0xF) * 1.f/ 4.f + (acc1[2] + 1.f/256.f * acc2[2]) * (sc[4] & 0xF) * 1.f/16.f + (acc1[3] + 1.f/256.f * acc2[3]) * (sc[6] & 0xF) * 1.f/64.f) - dmin * (sumy[0] * (sc[0] & 0xF0) + sumy[1] * (sc[2] & 0xF0) + sumy[2] * (sc[4] & 0xF0) + sumy[3] * (sc[6] & 0xF0)); qs += step/2; sc += step; dh += step/2; } y4 += 4 * QK_K; } #else const int ix = tiisg/2; // 0...15 const int it = tiisg%2; // 0...1 device const float * y4 = y + ix * QK_K + 8 * it; for (int ib = ix; ib < nb; ib += 16) { float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; ++i) { yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0]; yl[i+ 8] = y4[i+16]; sumy[1] += yl[i+ 8]; yl[i+16] = y4[i+32]; sumy[2] += yl[i+16]; yl[i+24] = y4[i+48]; sumy[3] += yl[i+24]; } device const uint8_t * sc = (device const uint8_t *)x[ib].scales; device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it; device const half * dh = &x[ib].d; for (int row = 0; row < N_DST; row++) { float4 acc1 = {0.f, 0.f, 0.f, 0.f}; float4 acc2 = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; i += 2) { acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003); acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300); acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c); acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00); acc1[2] += yl[i+16] * (qs[i/2] & 0x0030); acc2[2] += yl[i+17] * (qs[i/2] & 0x3000); acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0); acc2[3] += yl[i+25] * (qs[i/2] & 0xc000); } float dall = dh[0]; float dmin = dh[1]; sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f + (acc1[1] + 1.f/256.f * acc2[1]) * (sc[1] & 0xF) * 1.f/ 4.f + (acc1[2] + 1.f/256.f * acc2[2]) * (sc[2] & 0xF) * 1.f/16.f + (acc1[3] + 1.f/256.f * acc2[3]) * (sc[3] & 0xF) * 1.f/64.f) - dmin * (sumy[0] * (sc[0] >> 4) + sumy[1] * (sc[1] >> 4) + sumy[2] * (sc[2] >> 4) + sumy[3] * (sc[3] >> 4)); qs += step/2; sc += step; dh += step/2; } y4 += 16 * QK_K; } #endif for (int row = 0; row < N_DST; ++row) { all_sum = simd_sum(sumf[row]); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; } } } [[host_name("kernel_mul_mv_q2_K_f32")]] kernel void kernel_mul_mv_q2_K_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q2_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } #if QK_K == 256 void kernel_mul_mv_q3_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nb = ne00/QK_K; const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; const int64_t im = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q3_K * x = (device const block_q3_K *) src0 + first_row*nb + offset0; device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[32]; //const uint16_t kmask1 = 0x3030; //const uint16_t kmask2 = 0x0f0f; const int tid = tiisg/4; const int ix = tiisg%4; const int ip = tid/4; // 0 or 1 const int il = 2*((tid%4)/2); // 0 or 2 const int ir = tid%2; const int n = 8; const int l0 = n*ir; // One would think that the Metal compiler would figure out that ip and il can only have // 4 possible states, and optimize accordingly. Well, no. It needs help, and we do it // with these two tales. // // Possible masks for the high bit const ushort4 mm[4] = {{0x0001, 0x0100, 0x0002, 0x0200}, // ip = 0, il = 0 {0x0004, 0x0400, 0x0008, 0x0800}, // ip = 0, il = 2 {0x0010, 0x1000, 0x0020, 0x2000}, // ip = 1, il = 0 {0x0040, 0x4000, 0x0080, 0x8000}}; // ip = 1, il = 2 // Possible masks for the low 2 bits const int4 qm[2] = {{0x0003, 0x0300, 0x000c, 0x0c00}, {0x0030, 0x3000, 0x00c0, 0xc000}}; const ushort4 hm = mm[2*ip + il/2]; const int shift = 2*il; const float v1 = il == 0 ? 4.f : 64.f; const float v2 = 4.f * v1; const uint16_t s_shift1 = 4*ip; const uint16_t s_shift2 = s_shift1 + il; const int q_offset = 32*ip + l0; const int y_offset = 128*ip + 32*il + l0; const int step = sizeof(block_q3_K) * nb / 2; device const float * y1 = yy + ix*QK_K + y_offset; uint32_t scales32, aux32; thread uint16_t * scales16 = (thread uint16_t *)&scales32; thread const int8_t * scales = (thread const int8_t *)&scales32; float sumf1[2] = {0.f}; float sumf2[2] = {0.f}; for (int i = ix; i < nb; i += 4) { for (int l = 0; l < 8; ++l) { yl[l+ 0] = y1[l+ 0]; yl[l+ 8] = y1[l+16]; yl[l+16] = y1[l+32]; yl[l+24] = y1[l+48]; } device const uint16_t * q = (device const uint16_t *)(x[i].qs + q_offset); device const uint16_t * h = (device const uint16_t *)(x[i].hmask + l0); device const uint16_t * a = (device const uint16_t *)(x[i].scales); device const half * dh = &x[i].d; for (int row = 0; row < 2; ++row) { const float d_all = (float)dh[0]; scales16[0] = a[4]; scales16[1] = a[5]; aux32 = ((scales32 >> s_shift2) << 4) & 0x30303030; scales16[0] = a[il+0]; scales16[1] = a[il+1]; scales32 = ((scales32 >> s_shift1) & 0x0f0f0f0f) | aux32; float s1 = 0, s2 = 0, s3 = 0, s4 = 0, s5 = 0, s6 = 0; for (int l = 0; l < n; l += 2) { const int32_t qs = q[l/2]; s1 += yl[l+0] * (qs & qm[il/2][0]); s2 += yl[l+1] * (qs & qm[il/2][1]); s3 += ((h[l/2] & hm[0]) ? 0.f : yl[l+0]) + ((h[l/2] & hm[1]) ? 0.f : yl[l+1]); s4 += yl[l+16] * (qs & qm[il/2][2]); s5 += yl[l+17] * (qs & qm[il/2][3]); s6 += ((h[l/2] & hm[2]) ? 0.f : yl[l+16]) + ((h[l/2] & hm[3]) ? 0.f : yl[l+17]); } float d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1); float d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2); sumf1[row] += d1 * (scales[0] - 32); sumf2[row] += d2 * (scales[2] - 32); s1 = s2 = s3 = s4 = s5 = s6 = 0; for (int l = 0; l < n; l += 2) { const int32_t qs = q[l/2+8]; s1 += yl[l+8] * (qs & qm[il/2][0]); s2 += yl[l+9] * (qs & qm[il/2][1]); s3 += ((h[l/2+8] & hm[0]) ? 0.f : yl[l+8]) + ((h[l/2+8] & hm[1]) ? 0.f : yl[l+9]); s4 += yl[l+24] * (qs & qm[il/2][2]); s5 += yl[l+25] * (qs & qm[il/2][3]); s6 += ((h[l/2+8] & hm[2]) ? 0.f : yl[l+24]) + ((h[l/2+8] & hm[3]) ? 0.f : yl[l+25]); } d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1); d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2); sumf1[row] += d1 * (scales[1] - 32); sumf2[row] += d2 * (scales[3] - 32); q += step; h += step; a += step; dh += step; } y1 += 4 * QK_K; } for (int row = 0; row < 2; ++row) { const float sumf = (sumf1[row] + 0.25f * sumf2[row]) / (1 << shift); sumf1[row] = simd_sum(sumf); } if (tiisg == 0) { for (int row = 0; row < 2; ++row) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = sumf1[row]; } } } #else void kernel_mul_mv_q3_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nb = ne00/QK_K; const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; const int64_t im = tgpig.z; const int row = 2 * r0 + sgitg; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q3_K * x = (device const block_q3_K *) src0 + row*nb + offset0; device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; const int ix = tiisg/4; const int il = 4 * (tiisg%4);// 0, 4, 8, 12 const int iq = il/8; // 0, 0, 1, 1 const int in = il%8; // 0, 4, 0, 4 float2 sum = {0.f, 0.f}; for (int i = ix; i < nb; i += 8) { const float d_all = (float)(x[i].d); device const uint16_t * q = (device const uint16_t *)(x[i].qs + il); device const uint16_t * h = (device const uint16_t *)(x[i].hmask + in); device const uint16_t * s = (device const uint16_t *)(x[i].scales); device const float * y = yy + i * QK_K + il; const float d1 = d_all * ((int32_t)(s[0] & 0x000F) - 8); const float d2 = d_all * ((int32_t)(s[0] & 0x00F0) - 128) * 1.f/64.f; const float d3 = d_all * ((int32_t)(s[0] & 0x0F00) - 2048) * 1.f/4096.f; const float d4 = d_all * ((int32_t)(s[0] & 0xF000) - 32768) * 1.f/262144.f; for (int l = 0; l < 4; l += 2) { const uint16_t hm = h[l/2] >> iq; sum[0] += y[l+ 0] * d1 * ((int32_t)(q[l/2] & 0x0003) - ((hm & 0x0001) ? 0 : 4)) + y[l+16] * d2 * ((int32_t)(q[l/2] & 0x000c) - ((hm & 0x0004) ? 0 : 16)) + y[l+32] * d3 * ((int32_t)(q[l/2] & 0x0030) - ((hm & 0x0010) ? 0 : 64)) + y[l+48] * d4 * ((int32_t)(q[l/2] & 0x00c0) - ((hm & 0x0040) ? 0 : 256)); sum[1] += y[l+ 1] * d1 * ((int32_t)(q[l/2] & 0x0300) - ((hm & 0x0100) ? 0 : 1024)) + y[l+17] * d2 * ((int32_t)(q[l/2] & 0x0c00) - ((hm & 0x0400) ? 0 : 4096)) + y[l+33] * d3 * ((int32_t)(q[l/2] & 0x3000) - ((hm & 0x1000) ? 0 : 16384)) + y[l+49] * d4 * ((int32_t)(q[l/2] & 0xc000) - ((hm & 0x4000) ? 0 : 65536)); } } const float sumf = sum[0] + sum[1] * 1.f/256.f; const float tot = simd_sum(sumf); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + row] = tot; } } #endif [[host_name("kernel_mul_mv_q3_K_f32")]] kernel void kernel_mul_mv_q3_K_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q3_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } #if QK_K == 256 void kernel_mul_mv_q4_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int ix = tiisg/8; // 0...3 const int it = tiisg%8; // 0...7 const int iq = it/4; // 0 or 1 const int ir = it%4; // 0...3 const int nb = ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; //const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST; const int first_row = r0 * N_DST; const int ib_row = first_row * nb; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0; device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[16]; float yh[16]; float sumf[N_DST]={0.f}, all_sum; const int step = sizeof(block_q4_K) * nb / 2; device const float * y4 = y + ix * QK_K + 64 * iq + 8 * ir; uint16_t sc16[4]; thread const uint8_t * sc8 = (thread const uint8_t *)sc16; for (int ib = ix; ib < nb; ib += 4) { float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; ++i) { yl[i+0] = y4[i+ 0]; sumy[0] += yl[i+0]; yl[i+8] = y4[i+ 32]; sumy[1] += yl[i+8]; yh[i+0] = y4[i+128]; sumy[2] += yh[i+0]; yh[i+8] = y4[i+160]; sumy[3] += yh[i+8]; } device const uint16_t * sc = (device const uint16_t *)x[ib].scales + iq; device const uint16_t * q1 = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir; device const half * dh = &x[ib].d; for (int row = 0; row < N_DST; row++) { sc16[0] = sc[0] & kmask1; sc16[1] = sc[2] & kmask1; sc16[2] = ((sc[4] >> 0) & kmask2) | ((sc[0] & kmask3) >> 2); sc16[3] = ((sc[4] >> 4) & kmask2) | ((sc[2] & kmask3) >> 2); device const uint16_t * q2 = q1 + 32; float4 acc1 = {0.f, 0.f, 0.f, 0.f}; float4 acc2 = {0.f, 0.f, 0.f, 0.f}; for (int i = 0; i < 8; i += 2) { acc1[0] += yl[i+0] * (q1[i/2] & 0x000F); acc1[1] += yl[i+1] * (q1[i/2] & 0x0F00); acc1[2] += yl[i+8] * (q1[i/2] & 0x00F0); acc1[3] += yl[i+9] * (q1[i/2] & 0xF000); acc2[0] += yh[i+0] * (q2[i/2] & 0x000F); acc2[1] += yh[i+1] * (q2[i/2] & 0x0F00); acc2[2] += yh[i+8] * (q2[i/2] & 0x00F0); acc2[3] += yh[i+9] * (q2[i/2] & 0xF000); } float dall = dh[0]; float dmin = dh[1]; sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc8[0] + (acc1[2] + 1.f/256.f * acc1[3]) * sc8[1] * 1.f/16.f + (acc2[0] + 1.f/256.f * acc2[1]) * sc8[4] + (acc2[2] + 1.f/256.f * acc2[3]) * sc8[5] * 1.f/16.f) - dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]); q1 += step; sc += step; dh += step; } y4 += 4 * QK_K; } for (int row = 0; row < N_DST; ++row) { all_sum = simd_sum(sumf[row]); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; } } } #else void kernel_mul_mv_q4_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int ix = tiisg/4; // 0...7 const int it = tiisg%4; // 0...3 const int nb = ne00/QK_K; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; const int first_row = r0 * N_DST; const int ib_row = first_row * nb; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0; device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float yl[8]; float yh[8]; float sumf[N_DST]={0.f}, all_sum; const int step = sizeof(block_q4_K) * nb / 2; device const float * y4 = y + ix * QK_K + 8 * it; uint16_t sc16[4]; for (int ib = ix; ib < nb; ib += 8) { float2 sumy = {0.f, 0.f}; for (int i = 0; i < 8; ++i) { yl[i] = y4[i+ 0]; sumy[0] += yl[i]; yh[i] = y4[i+32]; sumy[1] += yh[i]; } device const uint16_t * sc = (device const uint16_t *)x[ib].scales; device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it; device const half * dh = x[ib].d; for (int row = 0; row < N_DST; row++) { sc16[0] = sc[0] & 0x000f; sc16[1] = sc[0] & 0x0f00; sc16[2] = sc[0] & 0x00f0; sc16[3] = sc[0] & 0xf000; float2 acc1 = {0.f, 0.f}; float2 acc2 = {0.f, 0.f}; for (int i = 0; i < 8; i += 2) { acc1[0] += yl[i+0] * (qs[i/2] & 0x000F); acc1[1] += yl[i+1] * (qs[i/2] & 0x0F00); acc2[0] += yh[i+0] * (qs[i/2] & 0x00F0); acc2[1] += yh[i+1] * (qs[i/2] & 0xF000); } float dall = dh[0]; float dmin = dh[1]; sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc16[0] + (acc2[0] + 1.f/256.f * acc2[1]) * sc16[1] * 1.f/4096.f) - dmin * 1.f/16.f * (sumy[0] * sc16[2] + sumy[1] * sc16[3] * 1.f/256.f); qs += step; sc += step; dh += step; } y4 += 8 * QK_K; } for (int row = 0; row < N_DST; ++row) { all_sum = simd_sum(sumf[row]); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = all_sum; } } } #endif [[host_name("kernel_mul_mv_q4_K_f32")]] kernel void kernel_mul_mv_q4_K_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q4_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } void kernel_mul_mv_q5_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const int nb = ne00/QK_K; const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; const int im = tgpig.z; const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q5_K * x = (device const block_q5_K *) src0 + first_row*nb + offset0; device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float sumf[2]={0.f}; const int step = sizeof(block_q5_K) * nb; #if QK_K == 256 # float yl[16], yh[16]; const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int tid = tiisg/4; const int ix = tiisg%4; const int iq = tid/4; const int ir = tid%4; const int n = 8; const int l0 = n*ir; const int q_offset = 32*iq + l0; const int y_offset = 64*iq + l0; const uint8_t hm1 = 1u << (2*iq); const uint8_t hm2 = hm1 << 1; const uint8_t hm3 = hm1 << 4; const uint8_t hm4 = hm2 << 4; uint16_t sc16[4]; thread const uint8_t * sc8 = (thread const uint8_t *)sc16; device const float * y1 = yy + ix*QK_K + y_offset; for (int i = ix; i < nb; i += 4) { device const uint8_t * q1 = x[i].qs + q_offset; device const uint8_t * qh = x[i].qh + l0; device const half * dh = &x[i].d; device const uint16_t * a = (device const uint16_t *)x[i].scales + iq; device const float * y2 = y1 + 128; float4 sumy = {0.f, 0.f, 0.f, 0.f}; for (int l = 0; l < 8; ++l) { yl[l+0] = y1[l+ 0]; sumy[0] += yl[l+0]; yl[l+8] = y1[l+32]; sumy[1] += yl[l+8]; yh[l+0] = y2[l+ 0]; sumy[2] += yh[l+0]; yh[l+8] = y2[l+32]; sumy[3] += yh[l+8]; } for (int row = 0; row < 2; ++row) { device const uint8_t * q2 = q1 + 64; sc16[0] = a[0] & kmask1; sc16[1] = a[2] & kmask1; sc16[2] = ((a[4] >> 0) & kmask2) | ((a[0] & kmask3) >> 2); sc16[3] = ((a[4] >> 4) & kmask2) | ((a[2] & kmask3) >> 2); float4 acc1 = {0.f}; float4 acc2 = {0.f}; for (int l = 0; l < n; ++l) { uint8_t h = qh[l]; acc1[0] += yl[l+0] * (q1[l] & 0x0F); acc1[1] += yl[l+8] * (q1[l] & 0xF0); acc1[2] += yh[l+0] * (q2[l] & 0x0F); acc1[3] += yh[l+8] * (q2[l] & 0xF0); acc2[0] += h & hm1 ? yl[l+0] : 0.f; acc2[1] += h & hm2 ? yl[l+8] : 0.f; acc2[2] += h & hm3 ? yh[l+0] : 0.f; acc2[3] += h & hm4 ? yh[l+8] : 0.f; } const float dall = dh[0]; const float dmin = dh[1]; sumf[row] += dall * (sc8[0] * (acc1[0] + 16.f*acc2[0]) + sc8[1] * (acc1[1]/16.f + 16.f*acc2[1]) + sc8[4] * (acc1[2] + 16.f*acc2[2]) + sc8[5] * (acc1[3]/16.f + 16.f*acc2[3])) - dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]); q1 += step; qh += step; dh += step/2; a += step/2; } y1 += 4 * QK_K; } #else float yl[8], yh[8]; const int il = 4 * (tiisg/8); // 0, 4, 8, 12 const int ix = tiisg%8; const int iq = il/8; // 0, 0, 1, 1 const int in = il%8; // 0, 4, 0, 4 device const float * y = yy + ix*QK_K + il; for (int i = ix; i < nb; i += 8) { for (int l = 0; l < 4; ++l) { yl[l+0] = y[l+ 0]; yl[l+4] = y[l+16]; yh[l+0] = y[l+32]; yh[l+4] = y[l+48]; } device const half * dh = &x[i].d; device const uint8_t * q = x[i].qs + il; device const uint8_t * h = x[i].qh + in; device const int8_t * s = x[i].scales; for (int row = 0; row < 2; ++row) { const float d = dh[0]; float2 acc = {0.f, 0.f}; for (int l = 0; l < 4; ++l) { const uint8_t hl = h[l] >> iq; acc[0] += yl[l+0] * s[0] * ((int16_t)(q[l+ 0] & 0x0F) - (hl & 0x01 ? 0 : 16)) + yl[l+4] * s[1] * ((int16_t)(q[l+16] & 0x0F) - (hl & 0x04 ? 0 : 16)); acc[1] += yh[l+0] * s[2] * ((int16_t)(q[l+ 0] & 0xF0) - (hl & 0x10 ? 0 : 256)) + yh[l+4] * s[3] * ((int16_t)(q[l+16] & 0xF0) - (hl & 0x40 ? 0 : 256)); } sumf[row] += d * (acc[0] + 1.f/16.f * acc[1]); q += step; h += step; s += step; dh += step/2; } y += 8 * QK_K; } #endif for (int row = 0; row < 2; ++row) { const float tot = simd_sum(sumf[row]); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot; } } } [[host_name("kernel_mul_mv_q5_K_f32")]] kernel void kernel_mul_mv_q5_K_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q5_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } void kernel_mul_mv_q6_K_f32_impl( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne10, constant int64_t & ne12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { const uint8_t kmask1 = 0x03; const uint8_t kmask2 = 0x0C; const uint8_t kmask3 = 0x30; const uint8_t kmask4 = 0xC0; const int nb = ne00/QK_K; const int64_t r0 = tgpig.x; const int64_t r1 = tgpig.y; const int im = tgpig.z; const int row = 2 * r0 + sgitg; const uint i12 = im%ne12; const uint i13 = im/ne12; const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); device const block_q6_K * x = (device const block_q6_K *) src0 + row * nb + offset0; device const float * yy = (device const float *) src1 + r1*ne10 + im*ne00*ne1; float sumf = 0; #if QK_K == 256 const int tid = tiisg/2; const int ix = tiisg%2; const int ip = tid/8; // 0 or 1 const int il = tid%8; const int n = 4; const int l0 = n*il; const int is = 8*ip + l0/16; const int y_offset = 128*ip + l0; const int q_offset_l = 64*ip + l0; const int q_offset_h = 32*ip + l0; for (int i = ix; i < nb; i += 2) { device const uint8_t * q1 = x[i].ql + q_offset_l; device const uint8_t * q2 = q1 + 32; device const uint8_t * qh = x[i].qh + q_offset_h; device const int8_t * sc = x[i].scales + is; device const float * y = yy + i * QK_K + y_offset; const float dall = x[i].d; float4 sums = {0.f, 0.f, 0.f, 0.f}; for (int l = 0; l < n; ++l) { sums[0] += y[l+ 0] * ((int8_t)((q1[l] & 0xF) | ((qh[l] & kmask1) << 4)) - 32); sums[1] += y[l+32] * ((int8_t)((q2[l] & 0xF) | ((qh[l] & kmask2) << 2)) - 32); sums[2] += y[l+64] * ((int8_t)((q1[l] >> 4) | ((qh[l] & kmask3) << 0)) - 32); sums[3] += y[l+96] * ((int8_t)((q2[l] >> 4) | ((qh[l] & kmask4) >> 2)) - 32); } sumf += dall * (sums[0] * sc[0] + sums[1] * sc[2] + sums[2] * sc[4] + sums[3] * sc[6]); } #else const int ix = tiisg/4; const int il = 4*(tiisg%4); for (int i = ix; i < nb; i += 8) { device const float * y = yy + i * QK_K + il; device const uint8_t * ql = x[i].ql + il; device const uint8_t * qh = x[i].qh + il; device const int8_t * s = x[i].scales; const float d = x[i].d; float4 sums = {0.f, 0.f, 0.f, 0.f}; for (int l = 0; l < 4; ++l) { sums[0] += y[l+ 0] * ((int8_t)((ql[l+ 0] & 0xF) | ((qh[l] & kmask1) << 4)) - 32); sums[1] += y[l+16] * ((int8_t)((ql[l+16] & 0xF) | ((qh[l] & kmask2) << 2)) - 32); sums[2] += y[l+32] * ((int8_t)((ql[l+ 0] >> 4) | ((qh[l] & kmask3) >> 0)) - 32); sums[3] += y[l+48] * ((int8_t)((ql[l+16] >> 4) | ((qh[l] & kmask4) >> 2)) - 32); } sumf += d * (sums[0] * s[0] + sums[1] * s[1] + sums[2] * s[2] + sums[3] * s[3]); } #endif const float tot = simd_sum(sumf); if (tiisg == 0) { dst[r1*ne0 + im*ne0*ne1 + row] = tot; } } [[host_name("kernel_mul_mv_q6_K_f32")]] kernel void kernel_mul_mv_q6_K_f32( device const void * src0, device const float * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, uint3 tgpig[[threadgroup_position_in_grid]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mv_q6_K_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } //============================= templates and their specializations ============================= // NOTE: this is not dequantizing - we are simply fitting the template template <typename type4x4> void dequantize_f32(device const float4x4 * src, short il, thread type4x4 & reg) { float4x4 temp = *(((device float4x4 *)src)); for (int i = 0; i < 16; i++){ reg[i/4][i%4] = temp[i/4][i%4]; } } template <typename type4x4> void dequantize_f16(device const half4x4 * src, short il, thread type4x4 & reg) { half4x4 temp = *(((device half4x4 *)src)); for (int i = 0; i < 16; i++){ reg[i/4][i%4] = temp[i/4][i%4]; } } template <typename type4x4> void dequantize_q4_0(device const block_q4_0 *xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 1); const float d1 = il ? (xb->d / 16.h) : xb->d; const float d2 = d1 / 256.f; const float md = -8.h * xb->d; const ushort mask0 = il ? 0x00F0 : 0x000F; const ushort mask1 = mask0 << 8; for (int i=0;i<8;i++) { reg[i/2][2*(i%2)+0] = d1 * (qs[i] & mask0) + md; reg[i/2][2*(i%2)+1] = d2 * (qs[i] & mask1) + md; } } template <typename type4x4> void dequantize_q4_1(device const block_q4_1 *xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 2); const float d1 = il ? (xb->d / 16.h) : xb->d; const float d2 = d1 / 256.f; const float m = xb->m; const ushort mask0 = il ? 0x00F0 : 0x000F; const ushort mask1 = mask0 << 8; for (int i=0;i<8;i++) { reg[i/2][2*(i%2)+0] = ((qs[i] & mask0) * d1) + m; reg[i/2][2*(i%2)+1] = ((qs[i] & mask1) * d2) + m; } } template <typename type4x4> void dequantize_q5_0(device const block_q5_0 *xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 3); const float d = xb->d; const float md = -16.h * xb->d; const ushort mask = il ? 0x00F0 : 0x000F; const uint32_t qh = *((device const uint32_t *)xb->qh); const int x_mv = il ? 4 : 0; const int gh_mv = il ? 12 : 0; const int gh_bk = il ? 0 : 4; for (int i = 0; i < 8; i++) { // extract the 5-th bits for x0 and x1 const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; // combine the 4-bits from qs with the 5th bit const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); reg[i/2][2*(i%2)+0] = d * x0 + md; reg[i/2][2*(i%2)+1] = d * x1 + md; } } template <typename type4x4> void dequantize_q5_1(device const block_q5_1 *xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 4); const float d = xb->d; const float m = xb->m; const ushort mask = il ? 0x00F0 : 0x000F; const uint32_t qh = *((device const uint32_t *)xb->qh); const int x_mv = il ? 4 : 0; const int gh_mv = il ? 12 : 0; const int gh_bk = il ? 0 : 4; for (int i = 0; i < 8; i++) { // extract the 5-th bits for x0 and x1 const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; // combine the 4-bits from qs with the 5th bit const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); reg[i/2][2*(i%2)+0] = d * x0 + m; reg[i/2][2*(i%2)+1] = d * x1 + m; } } template <typename type4x4> void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg) { device const int8_t * qs = ((device const int8_t *)xb->qs); const half d = xb->d; for (int i = 0; i < 16; i++) { reg[i/4][i%4] = (qs[i + 16*il] * d); } } template <typename type4x4> void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) { const float d = xb->d; const float min = xb->dmin; device const uint8_t * q = (device const uint8_t *)xb->qs; float dl, ml; uint8_t sc = xb->scales[il]; #if QK_K == 256 q = q + 32*(il/8) + 16*(il&1); il = (il/2)%4; #endif half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); uchar mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); dl = d * (sc & 0xF) * coef, ml = min * (sc >> 4); for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * (q[i] & mask) - ml; } } template <typename type4x4> void dequantize_q3_K(device const block_q3_K *xb, short il, thread type4x4 & reg) { const half d_all = xb->d; device const uint8_t * q = (device const uint8_t *)xb->qs; device const uint8_t * h = (device const uint8_t *)xb->hmask; device const int8_t * scales = (device const int8_t *)xb->scales; #if QK_K == 256 q = q + 32 * (il/8) + 16 * (il&1); h = h + 16 * (il&1); uint8_t m = 1 << (il/2); uint16_t kmask1 = (il/4)>1 ? ((il/4)>2 ? 192 : 48) : \ ((il/4)>0 ? 12 : 3); uint16_t kmask2 = il/8 ? 0xF0 : 0x0F; uint16_t scale_2 = scales[il%8], scale_1 = scales[8 + il%4]; int16_t dl_int = (il/4)&1 ? (scale_2&kmask2) | ((scale_1&kmask1) << 2) : (scale_2&kmask2) | ((scale_1&kmask1) << 4); half dl = il<8 ? d_all * (dl_int - 32.h) : d_all * (dl_int / 16.h - 32.h); const half ml = 4.h * dl; il = (il/2) & 3; const half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); const uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); dl *= coef; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * (q[i] & mask) - (h[i] & m ? 0 : ml); } #else float kcoef = il&1 ? 1.f/16.f : 1.f; uint16_t kmask = il&1 ? 0xF0 : 0x0F; float dl = d_all * ((scales[il/2] & kmask) * kcoef - 8); float coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h); uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); uint8_t m = 1<<(il*2); for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = coef * dl * ((q[i] & mask) - ((h[i%8] & (m * (1 + i/8))) ? 0 : 4.f/coef)); } #endif } static inline uchar2 get_scale_min_k4_just2(int j, int k, device const uchar * q) { return j < 4 ? uchar2{uchar(q[j+0+k] & 63), uchar(q[j+4+k] & 63)} : uchar2{uchar((q[j+4+k] & 0xF) | ((q[j-4+k] & 0xc0) >> 2)), uchar((q[j+4+k] >> 4) | ((q[j-0+k] & 0xc0) >> 2))}; } template <typename type4x4> void dequantize_q4_K(device const block_q4_K *xb, short il, thread type4x4 & reg) { device const uchar * q = xb->qs; #if QK_K == 256 short is = (il/4) * 2; q = q + (il/4) * 32 + 16 * (il&1); il = il & 3; const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales); const float d = il < 2 ? xb->d : xb->d / 16.h; const float min = xb->dmin; const float dl = d * sc[0]; const float ml = min * sc[1]; #else q = q + 16 * (il&1); device const uint8_t * s = xb->scales; device const half2 * dh = (device const half2 *)xb->d; const float2 d = (float2)dh[0]; const float dl = il<2 ? d[0] * (s[0]&0xF) : d[0] * (s[1]&0xF)/16.h; const float ml = il<2 ? d[1] * (s[0]>>4) : d[1] * (s[1]>>4); #endif const ushort mask = il<2 ? 0x0F : 0xF0; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * (q[i] & mask) - ml; } } template <typename type4x4> void dequantize_q5_K(device const block_q5_K *xb, short il, thread type4x4 & reg) { device const uint8_t * q = xb->qs; device const uint8_t * qh = xb->qh; #if QK_K == 256 short is = (il/4) * 2; q = q + 32 * (il/4) + 16 * (il&1); qh = qh + 16 * (il&1); uint8_t ul = 1 << (il/2); il = il & 3; const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales); const float d = il < 2 ? xb->d : xb->d / 16.h; const float min = xb->dmin; const float dl = d * sc[0]; const float ml = min * sc[1]; const ushort mask = il<2 ? 0x0F : 0xF0; const float qh_val = il<2 ? 16.f : 256.f; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * ((q[i] & mask) + (qh[i] & ul ? qh_val : 0)) - ml; } #else q = q + 16 * (il&1); device const int8_t * s = xb->scales; const float dl = xb->d * s[il]; uint8_t m = 1<<(il*2); const float coef = il<2 ? 1.f : 1.f/16.f; const ushort mask = il<2 ? 0x0F : 0xF0; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = coef * dl * ((q[i] & mask) - (qh[i%8] & (m*(1+i/8)) ? 0.f : 16.f/coef)); } #endif } template <typename type4x4> void dequantize_q6_K(device const block_q6_K *xb, short il, thread type4x4 & reg) { const half d_all = xb->d; device const uint8_t * ql = (device const uint8_t *)xb->ql; device const uint8_t * qh = (device const uint8_t *)xb->qh; device const int8_t * scales = (device const int8_t *)xb->scales; #if QK_K == 256 ql = ql + 64*(il/8) + 32*((il/2)&1) + 16*(il&1); qh = qh + 32*(il/8) + 16*(il&1); half sc = scales[(il%2) + 2 * ((il/2))]; il = (il/2) & 3; #else ql = ql + 16 * (il&1); half sc = scales[il]; #endif const uint16_t kmask1 = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3); const uint16_t kmask2 = il>1 ? 0xF0 : 0x0F; const half coef = il>1 ? 1.f/16.h : 1.h; const half ml = d_all * sc * 32.h; const half dl = d_all * sc * coef; for (int i = 0; i < 16; ++i) { const half q = il&1 ? ((ql[i] & kmask2) | ((qh[i] & kmask1) << 2)) : ((ql[i] & kmask2) | ((qh[i] & kmask1) << 4)); reg[i/4][i%4] = dl * q - ml; } } template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread float4x4 &)> kernel void kernel_get_rows( device const void * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb1, constant uint64_t & nb2, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { //const int64_t i = tgpig; //const int64_t r = ((device int32_t *) src1)[i]; const int64_t i10 = tgpig.x; const int64_t i11 = tgpig.y; const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0]; const int64_t i02 = i11; for (int64_t ind = tiitg; ind < ne00/16; ind += tptg.x) { float4x4 temp; dequantize_func( ((device const block_q *) ((device char *) src0 + r*nb01 + i02*nb02)) + ind/nl, ind%nl, temp); *(((device float4x4 *) ((device char *) dst + i11*nb2 + i10*nb1)) + ind) = temp; } } kernel void kernel_get_rows_f32( device const void * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb1, constant uint64_t & nb2, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { const int64_t i10 = tgpig.x; const int64_t i11 = tgpig.y; const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0]; const int64_t i02 = i11; for (int ind = tiitg; ind < ne00; ind += tptg.x) { ((device float *) ((device char *) dst + i11*nb2 + i10*nb1))[ind] = ((device float *) ((device char *) src0 + r*nb01 + i02*nb02))[ind]; } } kernel void kernel_get_rows_f16( device const void * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb1, constant uint64_t & nb2, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { const int64_t i10 = tgpig.x; const int64_t i11 = tgpig.y; const int64_t r = ((device int32_t *) ((device char *) src1 + i11*nb11 + i10*nb10))[0]; const int64_t i02 = i11; for (int ind = tiitg; ind < ne00; ind += tptg.x) { ((device float *) ((device char *) dst + i11*nb2 + i10*nb1))[ind] = ((device half *) ((device char *) src0 + r*nb01 + i02*nb02))[ind]; } } #define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A #define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix B #define BLOCK_SIZE_K 32 #define THREAD_MAT_M 4 // each thread take 4 simdgroup matrices from matrix A #define THREAD_MAT_N 2 // each thread take 2 simdgroup matrices from matrix B #define THREAD_PER_BLOCK 128 #define THREAD_PER_ROW 2 // 2 thread for each row in matrix A to load numbers #define THREAD_PER_COL 4 // 4 thread for each row in matrix B to load numbers #define SG_MAT_SIZE 64 // simdgroup matrix is of shape 8x8 #define SG_MAT_ROW 8 // each block_q contains 16*nl weights template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)> void kernel_mul_mm_impl(device const uchar * src0, device const uchar * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, threadgroup uchar * shared_memory [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { threadgroup half * sa = (threadgroup half *)(shared_memory); threadgroup float * sb = (threadgroup float *)(shared_memory + 4096); const uint r0 = tgpig.y; const uint r1 = tgpig.x; const uint im = tgpig.z; // if this block is of 64x32 shape or smaller short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M; short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N; // a thread shouldn't load data outside of the matrix short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1; short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1; simdgroup_half8x8 ma[4]; simdgroup_float8x8 mb[2]; simdgroup_float8x8 c_res[8]; for (int i = 0; i < 8; i++){ c_res[i] = make_filled_simdgroup_matrix<float, 8>(0.f); } short il = (tiitg % THREAD_PER_ROW); const uint i12 = im%ne12; const uint i13 = im/ne12; uint offset0 = (i12/r2)*nb02 + (i13/r3)*(nb02*ne02); ushort offset1 = il/nl; device const block_q * x = (device const block_q *)(src0 + (r0 * BLOCK_SIZE_M + thread_row) * nb01 + offset0) + offset1; device const float * y = (device const float *)(src1 + nb12 * im + nb11 * (r1 * BLOCK_SIZE_N + thread_col) + nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL))); for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) { // load data and store to threadgroup memory half4x4 temp_a; dequantize_func(x, il, temp_a); threadgroup_barrier(mem_flags::mem_threadgroup); #pragma unroll(16) for (int i = 0; i < 16; i++) { *(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \ + (tiitg % THREAD_PER_ROW) * 16 + (i / 8) * 8) \ + (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4]; } *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) = *((device float2x4 *)y); il = (il + 2 < nl) ? il + 2 : il % 2; x = (il < 2) ? x + (2+nl-1)/nl : x; y += BLOCK_SIZE_K; threadgroup_barrier(mem_flags::mem_threadgroup); // load matrices from threadgroup memory and conduct outer products threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2)); threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2)); #pragma unroll(4) for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) { #pragma unroll(4) for (int i = 0; i < 4; i++) { simdgroup_load(ma[i],lsma + SG_MAT_SIZE * i); } simdgroup_barrier(mem_flags::mem_none); #pragma unroll(2) for (int i = 0; i < 2; i++) { simdgroup_load(mb[i],lsmb + SG_MAT_SIZE * i); } lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE; lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE; #pragma unroll(8) for (int i = 0; i < 8; i++){ simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]); } } } if ((r0 + 1) * BLOCK_SIZE_M <= ne0 && (r1 + 1) * BLOCK_SIZE_N <= ne1) { device float * C = dst + (BLOCK_SIZE_M * r0 + 32 * (sgitg & 1)) \ + (BLOCK_SIZE_N * r1 + 16 * (sgitg >> 1)) * ne0 + im*ne1*ne0; for (int i = 0; i < 8; i++) { simdgroup_store(c_res[i], C + 8 * (i%4) + 8 * ne0 * (i/4), ne0); } } else { // block is smaller than 64x32, we should avoid writing data outside of the matrix threadgroup_barrier(mem_flags::mem_threadgroup); threadgroup float * temp_str = ((threadgroup float *)shared_memory) \ + 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M; for (int i = 0; i < 8; i++) { simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M); } threadgroup_barrier(mem_flags::mem_threadgroup); device float * C = dst + (BLOCK_SIZE_M * r0) + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0; if (sgitg == 0) { for (int i = 0; i < n_rows; i++) { for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) { *(C + i + j * ne0) = *(temp_str + i + j * BLOCK_SIZE_M); } } } } } // same as kernel_mul_mm_impl, but src1 and dst are accessed via indices stored in src1ids template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)> void kernel_mul_mm_id_impl( device const uchar * src0, device const uchar * src1, thread short * src1ids, device float * dst, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, int64_t ne1, constant uint & r2, constant uint & r3, threadgroup uchar * shared_memory, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { threadgroup half * sa = (threadgroup half *)(shared_memory); threadgroup float * sb = (threadgroup float *)(shared_memory + 4096); const uint r0 = tgpig.y; const uint r1 = tgpig.x; const uint im = tgpig.z; if (r1 * BLOCK_SIZE_N >= ne1) return; // if this block is of 64x32 shape or smaller short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M; short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N; // a thread shouldn't load data outside of the matrix short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1; short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1; simdgroup_half8x8 ma[4]; simdgroup_float8x8 mb[2]; simdgroup_float8x8 c_res[8]; for (int i = 0; i < 8; i++){ c_res[i] = make_filled_simdgroup_matrix<float, 8>(0.f); } short il = (tiitg % THREAD_PER_ROW); const uint i12 = im%ne12; const uint i13 = im/ne12; uint offset0 = (i12/r2)*nb02 + (i13/r3)*(nb02*ne02); ushort offset1 = il/nl; device const block_q * x = (device const block_q *)(src0 + (r0 * BLOCK_SIZE_M + thread_row) * nb01 + offset0) + offset1; device const float * y = (device const float *)(src1 + nb12 * im + nb11 * src1ids[r1 * BLOCK_SIZE_N + thread_col] + nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL))); for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) { // load data and store to threadgroup memory half4x4 temp_a; dequantize_func(x, il, temp_a); threadgroup_barrier(mem_flags::mem_threadgroup); for (int i = 0; i < 16; i++) { *(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \ + (tiitg % THREAD_PER_ROW) * 16 + (i / 8) * 8) \ + (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4]; } *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) = *((device float2x4 *)y); il = (il + 2 < nl) ? il + 2 : il % 2; x = (il < 2) ? x + (2+nl-1)/nl : x; y += BLOCK_SIZE_K; threadgroup_barrier(mem_flags::mem_threadgroup); // load matrices from threadgroup memory and conduct outer products threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2)); threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2)); for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) { for (int i = 0; i < 4; i++) { simdgroup_load(ma[i],lsma + SG_MAT_SIZE * i); } simdgroup_barrier(mem_flags::mem_none); for (int i = 0; i < 2; i++) { simdgroup_load(mb[i],lsmb + SG_MAT_SIZE * i); } lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE; lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE; for (int i = 0; i < 8; i++){ simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]); } } } { threadgroup_barrier(mem_flags::mem_threadgroup); threadgroup float * temp_str = ((threadgroup float *)shared_memory) \ + 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M; for (int i = 0; i < 8; i++) { simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M); } threadgroup_barrier(mem_flags::mem_threadgroup); device float * C = dst + (BLOCK_SIZE_M * r0) + im*ne1*ne0; if (sgitg == 0) { for (int i = 0; i < n_rows; i++) { for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) { *(C + i + src1ids[j + r1*BLOCK_SIZE_N] * ne0) = *(temp_str + i + j * BLOCK_SIZE_M); } } } } } template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)> kernel void kernel_mul_mm(device const uchar * src0, device const uchar * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, threadgroup uchar * shared_memory [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { kernel_mul_mm_impl<block_q, nl, dequantize_func>( src0, src1, dst, ne00, ne02, nb01, nb02, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, shared_memory, tgpig, tiitg, sgitg); } template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)> kernel void kernel_mul_mm_id( device const uchar * ids, device const uchar * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const uchar * src00, device const uchar * src01, device const uchar * src02, device const uchar * src03, device const uchar * src04, device const uchar * src05, device const uchar * src06, device const uchar * src07, threadgroup uchar * shared_memory [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const uchar * src0s[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; // expert id const int32_t id = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); // row indices of src1 for expert id int64_t _ne1 = 0; short src1ids[512]; for (int64_t i1 = 0; i1 < ne1; i1++) { if (((device int32_t *) (ids + i1*nbi1))[idx] == id) { src1ids[_ne1++] = i1; } } kernel_mul_mm_id_impl<block_q, nl, dequantize_func>( src0s[id], src1, src1ids, dst, ne00, ne02, nb01, nb02, ne12, nb10, nb11, nb12, ne0, _ne1, r2, r3, shared_memory, tgpig, tiitg, sgitg); } #if QK_K == 256 #define QK_NL 16 #else #define QK_NL 4 #endif // // get rows // typedef void (get_rows_t)( device const void * src0, device const char * src1, device float * dst, constant int64_t & ne00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb1, constant uint64_t & nb2, uint3, uint, uint3); //template [[host_name("kernel_get_rows_f32")]] kernel get_rows_t kernel_get_rows<float4x4, 1, dequantize_f32>; //template [[host_name("kernel_get_rows_f16")]] kernel get_rows_t kernel_get_rows<half4x4, 1, dequantize_f16>; template [[host_name("kernel_get_rows_q4_0")]] kernel get_rows_t kernel_get_rows<block_q4_0, 2, dequantize_q4_0>; template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_t kernel_get_rows<block_q4_1, 2, dequantize_q4_1>; template [[host_name("kernel_get_rows_q5_0")]] kernel get_rows_t kernel_get_rows<block_q5_0, 2, dequantize_q5_0>; template [[host_name("kernel_get_rows_q5_1")]] kernel get_rows_t kernel_get_rows<block_q5_1, 2, dequantize_q5_1>; template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_t kernel_get_rows<block_q8_0, 2, dequantize_q8_0>; template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_t kernel_get_rows<block_q2_K, QK_NL, dequantize_q2_K>; template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_t kernel_get_rows<block_q3_K, QK_NL, dequantize_q3_K>; template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_t kernel_get_rows<block_q4_K, QK_NL, dequantize_q4_K>; template [[host_name("kernel_get_rows_q5_K")]] kernel get_rows_t kernel_get_rows<block_q5_K, QK_NL, dequantize_q5_K>; template [[host_name("kernel_get_rows_q6_K")]] kernel get_rows_t kernel_get_rows<block_q6_K, QK_NL, dequantize_q6_K>; // // matrix-matrix multiplication // typedef void (mat_mm_t)( device const uchar * src0, device const uchar * src1, device float * dst, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint & r2, constant uint & r3, threadgroup uchar *, uint3, uint, uint); template [[host_name("kernel_mul_mm_f32_f32")]] kernel mat_mm_t kernel_mul_mm<float4x4, 1, dequantize_f32>; template [[host_name("kernel_mul_mm_f16_f32")]] kernel mat_mm_t kernel_mul_mm<half4x4, 1, dequantize_f16>; template [[host_name("kernel_mul_mm_q4_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_0, 2, dequantize_q4_0>; template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_1, 2, dequantize_q4_1>; template [[host_name("kernel_mul_mm_q5_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_0, 2, dequantize_q5_0>; template [[host_name("kernel_mul_mm_q5_1_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_1, 2, dequantize_q5_1>; template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q8_0, 2, dequantize_q8_0>; template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q2_K, QK_NL, dequantize_q2_K>; template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q3_K, QK_NL, dequantize_q3_K>; template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_K, QK_NL, dequantize_q4_K>; template [[host_name("kernel_mul_mm_q5_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_K, QK_NL, dequantize_q5_K>; template [[host_name("kernel_mul_mm_q6_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q6_K, QK_NL, dequantize_q6_K>; // // indirect matrix-matrix multiplication // typedef void (mat_mm_id_t)( device const uchar * ids, device const uchar * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne02, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const uchar * src00, device const uchar * src01, device const uchar * src02, device const uchar * src03, device const uchar * src04, device const uchar * src05, device const uchar * src06, device const uchar * src07, threadgroup uchar *, uint3, uint, uint); template [[host_name("kernel_mul_mm_id_f32_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<float4x4, 1, dequantize_f32>; template [[host_name("kernel_mul_mm_id_f16_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<half4x4, 1, dequantize_f16>; template [[host_name("kernel_mul_mm_id_q4_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q4_0, 2, dequantize_q4_0>; template [[host_name("kernel_mul_mm_id_q4_1_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q4_1, 2, dequantize_q4_1>; template [[host_name("kernel_mul_mm_id_q5_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q5_0, 2, dequantize_q5_0>; template [[host_name("kernel_mul_mm_id_q5_1_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q5_1, 2, dequantize_q5_1>; template [[host_name("kernel_mul_mm_id_q8_0_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q8_0, 2, dequantize_q8_0>; template [[host_name("kernel_mul_mm_id_q2_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q2_K, QK_NL, dequantize_q2_K>; template [[host_name("kernel_mul_mm_id_q3_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q3_K, QK_NL, dequantize_q3_K>; template [[host_name("kernel_mul_mm_id_q4_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q4_K, QK_NL, dequantize_q4_K>; template [[host_name("kernel_mul_mm_id_q5_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q5_K, QK_NL, dequantize_q5_K>; template [[host_name("kernel_mul_mm_id_q6_K_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<block_q6_K, QK_NL, dequantize_q6_K>; // // matrix-vector multiplication // [[host_name("kernel_mul_mv_id_f32_f32")]] kernel void kernel_mul_mv_id_f32_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_f32_f32_impl( src0[id], src1 + bid*nb11, dst + bid*ne0, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); } [[host_name("kernel_mul_mv_id_f16_f32")]] kernel void kernel_mul_mv_id_f16_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_f16_f32_impl( src0[id], src1 + bid*nb11, dst + bid*ne0, ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, nb10, nb11, nb12, ne0, ne1, r2, r3, tgpig, tiisg); } [[host_name("kernel_mul_mv_id_q8_0_f32")]] kernel void kernel_mul_mv_id_q8_0_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q8_0_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q4_0_f32")]] kernel void kernel_mul_mv_id_q4_0_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; mul_vec_q_n_f32_impl<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q4_1_f32")]] kernel void kernel_mul_mv_id_q4_1_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; mul_vec_q_n_f32_impl<block_q4_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q5_0_f32")]] kernel void kernel_mul_mv_id_q5_0_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; mul_vec_q_n_f32_impl<block_q5_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q5_1_f32")]] kernel void kernel_mul_mv_id_q5_1_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; mul_vec_q_n_f32_impl<block_q5_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q2_K_f32")]] kernel void kernel_mul_mv_id_q2_K_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q2_K_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q3_K_f32")]] kernel void kernel_mul_mv_id_q3_K_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q3_K_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q4_K_f32")]] kernel void kernel_mul_mv_id_q4_K_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q4_K_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q5_K_f32")]] kernel void kernel_mul_mv_id_q5_K_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q5_K_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); } [[host_name("kernel_mul_mv_id_q6_K_f32")]] kernel void kernel_mul_mv_id_q6_K_f32( device const char * ids, device const char * src1, device float * dst, constant uint64_t & nbi1, constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant int64_t & ne0, constant int64_t & ne1, constant uint64_t & nb1, constant uint & r2, constant uint & r3, constant int & idx, device const char * src00, device const char * src01, device const char * src02, device const char * src03, device const char * src04, device const char * src05, device const char * src06, device const char * src07, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], uint sgitg[[simdgroup_index_in_threadgroup]]) { device const char * src0[8] = {src00, src01, src02, src03, src04, src05, src06, src07}; const int64_t bid = tgpig.z/(ne12*ne13); tgpig.z = tgpig.z%(ne12*ne13); const int32_t id = ((device int32_t *) (ids + bid*nbi1))[idx]; kernel_mul_mv_q6_K_f32_impl( src0[id], (device const float *) (src1 + bid*nb11), dst + bid*ne0, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, tgpig, tiisg, sgitg); }
candle/candle-metal-kernels/src/quantized.metal/0
{ "file_path": "candle/candle-metal-kernels/src/quantized.metal", "repo_id": "candle", "token_count": 97299 }
//! Cache Implementations //! use candle::{Device, Result, Tensor}; #[derive(Debug, Clone)] pub struct Cache { // all_data is an option on a Tensor, this makes it possible to only create the actual tensor // on the first call where the batch size is easily known. // Also this makes it safe to clone a KvCache that has been reseted (as in it will not share // its internal state with the cloned instance). all_data: Option<Tensor>, dim: usize, current_seq_len: usize, max_seq_len: usize, } impl Cache { pub fn new(dim: usize, max_seq_len: usize) -> Self { Self { all_data: None, dim, current_seq_len: 0, max_seq_len, } } pub fn dim(&self) -> usize { self.dim } pub fn current_seq_len(&self) -> usize { self.current_seq_len } pub fn max_seq_len(&self) -> usize { self.max_seq_len } pub fn all_data(&self) -> &Option<Tensor> { &self.all_data } pub fn current_data(&self) -> Result<Option<Tensor>> { let data = match self.all_data.as_ref() { None => None, Some(d) => Some(d.narrow(self.dim, 0, self.current_seq_len)?), }; Ok(data) } pub fn reset(&mut self) { self.current_seq_len = 0; self.all_data = None; } pub fn append(&mut self, src: &Tensor) -> Result<()> { let seq_len = src.dim(self.dim)?; // This doesn't seem very idiomatic but because the creation can fail, it's tricky to use // self.all_data.get_or_insert_with. if self.all_data.is_none() { let mut shape = src.dims().to_vec(); shape[self.dim] = self.max_seq_len; let ad = Tensor::zeros(shape, src.dtype(), src.device())?; self.all_data = Some(ad) }; let ad = self.all_data.as_mut().unwrap(); if self.current_seq_len + seq_len > self.max_seq_len { candle::bail!( "kv-cache: above max-seq-len {}+{seq_len}>{}", self.current_seq_len, self.max_seq_len ) } ad.slice_set(src, self.dim, self.current_seq_len)?; self.current_seq_len += seq_len; Ok(()) } } #[derive(Debug, Clone)] pub struct KvCache { k: Cache, v: Cache, } impl KvCache { pub fn new(dim: usize, max_seq_len: usize) -> Self { let k = Cache::new(dim, max_seq_len); let v = Cache::new(dim, max_seq_len); Self { k, v } } pub fn k_cache(&self) -> &Cache { &self.k } pub fn v_cache(&self) -> &Cache { &self.v } pub fn k_cache_mut(&mut self) -> &mut Cache { &mut self.k } pub fn v_cache_mut(&mut self) -> &mut Cache { &mut self.v } pub fn k(&self) -> Result<Option<Tensor>> { self.k.current_data() } pub fn v(&self) -> Result<Option<Tensor>> { self.v.current_data() } pub fn append(&mut self, k: &Tensor, v: &Tensor) -> Result<(Tensor, Tensor)> { self.k.append(k)?; self.v.append(v)?; let out_k = self.k.current_data()?; let out_v = self.v.current_data()?; let k = match out_k { None => { let mut shape = k.dims().to_vec(); shape[self.k.dim] = 0; Tensor::zeros(shape, k.dtype(), k.device())? } Some(k) => k, }; let v = match out_v { None => { let mut shape = v.dims().to_vec(); shape[self.k.dim] = 0; Tensor::zeros(shape, v.dtype(), v.device())? } Some(v) => v, }; Ok((k, v)) } pub fn current_seq_len(&self) -> usize { self.k.current_seq_len() } pub fn reset(&mut self) { self.k.reset(); self.v.reset(); } } #[derive(Debug, Clone)] pub struct RotatingCache { all_data: Option<Tensor>, dim: usize, // `offset` is the current write index in the buffer offset: usize, // The total size of the sequence seen so far. current_seq_len: usize, // max_seq_len is the size of the rotating buffer, it is actually allowed for the full // sequence to grow past this limit. max_seq_len: usize, } impl RotatingCache { pub fn new(dim: usize, max_seq_len: usize) -> Self { Self { all_data: None, dim, offset: 0, current_seq_len: 0, max_seq_len, } } pub fn offset(&self) -> usize { self.offset } pub fn dim(&self) -> usize { self.dim } pub fn current_seq_len(&self) -> usize { self.current_seq_len } pub fn max_seq_len(&self) -> usize { self.max_seq_len } pub fn all_data(&self) -> &Option<Tensor> { &self.all_data } pub fn current_data(&self) -> Result<Option<Tensor>> { let data = match self.all_data.as_ref() { None => None, Some(d) => { if self.current_seq_len >= self.max_seq_len { Some(d.clone()) } else { Some(d.narrow(self.dim, 0, self.current_seq_len)?) } } }; Ok(data) } pub fn reset(&mut self) { self.offset = 0; self.current_seq_len = 0; self.all_data = None; } pub fn append(&mut self, src: &Tensor) -> Result<Tensor> { let seq_len = src.dim(self.dim)?; // This doesn't seem very idiomatic but because the creation can fail, it's tricky to use // self.all_data.get_or_insert_with. if self.all_data.is_none() { let mut shape = src.dims().to_vec(); shape[self.dim] = self.max_seq_len; let ad = Tensor::zeros(shape, src.dtype(), src.device())?; self.all_data = Some(ad) }; let ad = self.all_data.as_mut().unwrap(); self.current_seq_len += seq_len; if seq_len >= self.max_seq_len { let to_copy = src .narrow(self.dim, seq_len - self.max_seq_len, self.max_seq_len)? .contiguous()?; ad.slice_set(&to_copy, self.dim, 0)?; self.offset = 0; // Here we return `src` rather than `ad` so that all the past can be used. Ok(src.clone()) } else { let rem_len = self.max_seq_len - self.offset; if seq_len <= rem_len { ad.slice_set(&src.contiguous()?, self.dim, self.offset)?; self.offset = (self.offset + seq_len) % self.max_seq_len; } else { // We have to make two copies here as we go over the boundary of the cache. if rem_len > 0 { let src1 = src.narrow(self.dim, 0, rem_len)?.contiguous()?; ad.slice_set(&src1, self.dim, self.offset)?; } let src2 = src .narrow(self.dim, rem_len, seq_len - rem_len)? .contiguous()?; ad.slice_set(&src2, self.dim, 0)?; self.offset = seq_len - rem_len; } if self.current_seq_len >= self.max_seq_len { Ok(ad.clone()) } else { Ok(ad.narrow(self.dim, 0, self.current_seq_len)?) } } } fn get_mask_abs(&self, size1: usize, size2: usize, device: &Device) -> Result<Tensor> { let context = self.max_seq_len; let mask: Vec<_> = (0..size1) .flat_map(|i| { (0..size2).map(move |j| { u8::from(size1 + j > size2 + i || size1 + j + context < size2 + i) }) }) .collect(); Tensor::from_slice(&mask, (size1, size2), device) } fn get_mask_rel(&self, size1: usize, size2: usize, device: &Device) -> Result<Tensor> { let context = self.max_seq_len; let upd_offset = (self.offset + size1) % self.max_seq_len; let mask: Vec<_> = (0..size1) .flat_map(|pos_src| { // The absolute position of the elements that will get added to the cache. let pos_src = self.current_seq_len + pos_src; (0..size2).map(move |pos_cache_rel| { // The absolute position of the cache elements after the addition. let pos_cache = self.current_seq_len + size1 + pos_cache_rel - upd_offset; let pos_cache = if pos_cache_rel < upd_offset { pos_cache } else { pos_cache - self.max_seq_len }; u8::from(pos_cache > pos_src || pos_cache + context < pos_src) }) }) .collect(); Tensor::from_slice(&mask, (size1, size2), device) } /// Returns the attn_mask to be applied *after* adding `seq_len` to the cache. pub fn attn_mask(&self, seq_len: usize, device: &Device) -> Result<Option<Tensor>> { let mask = if seq_len == 1 { None } else { let mask = if seq_len < self.max_seq_len { let cache_out_len = (self.current_seq_len + seq_len).min(self.max_seq_len); self.get_mask_rel(seq_len, cache_out_len, device)? } else { self.get_mask_abs(seq_len, seq_len, device)? }; Some(mask) }; Ok(mask) } } #[derive(Debug, Clone)] pub struct RotatingKvCache { k: RotatingCache, v: RotatingCache, } impl RotatingKvCache { pub fn new(dim: usize, max_seq_len: usize) -> Self { let k = RotatingCache::new(dim, max_seq_len); let v = RotatingCache::new(dim, max_seq_len); Self { k, v } } pub fn k_cache(&self) -> &RotatingCache { &self.k } pub fn v_cache(&self) -> &RotatingCache { &self.v } pub fn k_cache_mut(&mut self) -> &mut RotatingCache { &mut self.k } pub fn v_cache_mut(&mut self) -> &mut RotatingCache { &mut self.v } pub fn k(&self) -> Result<Option<Tensor>> { self.k.current_data() } pub fn v(&self) -> Result<Option<Tensor>> { self.v.current_data() } pub fn append(&mut self, k: &Tensor, v: &Tensor) -> Result<(Tensor, Tensor)> { let out_k = self.k.append(k)?; let out_v = self.v.append(v)?; Ok((out_k, out_v)) } pub fn offset(&self) -> usize { self.k.offset() } pub fn current_seq_len(&self) -> usize { self.k.current_seq_len() } pub fn attn_mask(&self, seq_len: usize, device: &Device) -> Result<Option<Tensor>> { self.k.attn_mask(seq_len, device) } pub fn reset(&mut self) { self.k.reset(); self.v.reset(); } }
candle/candle-nn/src/kv_cache.rs/0
{ "file_path": "candle/candle-nn/src/kv_cache.rs", "repo_id": "candle", "token_count": 5654 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::test_utils::to_vec0_round; use candle::{Device, Result, Tensor}; /* Equivalent python code: import torch import torch.nn.functional as F input = torch.tensor([ [ 1.1050, 0.3013, -1.5394, -2.1528, -0.8634], [ 1.0730, -0.9419, -0.1670, -0.6582, 0.5061], [ 0.8318, 1.1154, -0.3610, 0.5351, 1.0830]]) target = torch.tensor([1, 0, 4]) print(F.nll_loss(F.log_softmax(input, dim=1), target)) print(F.cross_entropy(input, target)) */ #[test] fn nll_and_cross_entropy() -> Result<()> { let cpu = Device::Cpu; let input = Tensor::new( &[ [1.1050f32, 0.3013, -1.5394, -2.1528, -0.8634], [1.0730, -0.9419, -0.1670, -0.6582, 0.5061], [0.8318, 1.1154, -0.3610, 0.5351, 1.0830], ], &cpu, )?; let target = Tensor::new(&[1u32, 0, 4], &cpu)?; let log_softmax = candle_nn::ops::log_softmax(&input, 1)?; let loss = candle_nn::loss::nll(&log_softmax, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 1.1312); let loss = candle_nn::loss::cross_entropy(&input, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 1.1312); Ok(()) } /* Equivalent python code: import torch import torch.nn.functional as F inp = torch.Tensor([[ 2.3611, -0.8813, -0.5006, -0.2178], [ 0.0419, 0.0763, -1.0457, -1.6692], [-1.0494, 0.8111, 1.5723, 1.2315], [ 1.3081, 0.6641, 1.1802, -0.2547], [ 0.5292, 0.7636, 0.3692, -0.8318]]) target = torch.Tensor([[0., 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [1., 0., 0., 0.], [0., 0., 1., 0.]]) print(F.binary_cross_entropy_with_logits(inp, target)) */ #[test] fn binary_cross_entropy_with_logit() -> Result<()> { let cpu = Device::Cpu; let inp = [ [2.3611f32, -0.8813, -0.5006, -0.2178], [0.0419, 0.0763, -1.0457, -1.6692], [-1.0494, 0.8111, 1.5723, 1.2315], [1.3081, 0.6641, 1.1802, -0.2547], [0.5292, 0.7636, 0.3692, -0.8318], ]; let target = [ [0.0f32, 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [1., 0., 0., 0.], [0., 0., 1., 0.], ]; let inp = Tensor::new(&inp, &cpu)?; let target = Tensor::new(&target, &cpu)?; let loss = candle_nn::loss::binary_cross_entropy_with_logit(&inp, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 0.8224); Ok(()) }
candle/candle-nn/tests/loss.rs/0
{ "file_path": "candle/candle-nn/tests/loss.rs", "repo_id": "candle", "token_count": 1344 }
from .module import Module from typing import Optional, Tuple, Any from candle import Tensor import candle class Embedding(Module): """A simple lookup table that stores embeddings of a fixed dictionary and size. This module is often used to store word embeddings and retrieve them using indices. The input to the module is a list of indices, and the output is the corresponding word embeddings. Args: num_embeddings (int): size of the dictionary of embeddings embedding_dim (int): the size of each embedding vector Attributes: weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim) initialized from :math:`\mathcal{N}(0, 1)` Shape: - Input: :math:`(*)`, IntTensor or LongTensor of arbitrary shape containing the indices to extract - Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}` """ def __init__(self, num_embeddings: int, embedding_dim: int, device=None) -> None: factory_kwargs = {"device": device} super().__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.weight = candle.randn((num_embeddings, embedding_dim), **factory_kwargs) def forward(self, indexes: Tensor) -> Tensor: final_dims = list(indexes.shape) final_dims.append(self.embedding_dim) indexes = indexes.flatten_all() values = self.weight.index_select(indexes, 0) return values.reshape(final_dims)
candle/candle-pyo3/py_src/candle/nn/sparse.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/sparse.py", "repo_id": "candle", "token_count": 590 }
//! Implementation of BLIP text encoder/decoder. //! //! - 📝 [Paper](https://arxiv.org/abs/2201.12086). BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation" //! //! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-BLIP-Image-Captioning) //! - 💻 [GH Link](https://github.com/salesforce/BLIP) //! - 🤗 [HF Link](https://huggingface.co/Salesforce/blip-image-captioning-base) //! - 📝 [Paper](https://arxiv.org/abs/2201.12086) //! use super::with_tracing::{linear, Embedding, Linear}; use candle::{Module, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, VarBuilder}; use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub encoder_hidden_size: usize, pub intermediate_size: usize, pub projection_dim: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub max_position_embeddings: usize, pub hidden_act: candle_nn::Activation, pub layer_norm_eps: f64, pub is_decoder: bool, } #[derive(Debug, Clone)] struct TextEmbeddings { word_embedddings: Embedding, position_embeddings: Embedding, layer_norm: LayerNorm, position_ids: Tensor, } impl TextEmbeddings { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let word_embedddings = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("word_embeddings"))?; let position_embeddings = Embedding::new( cfg.max_position_embeddings, cfg.hidden_size, vb.pp("position_embeddings"), )?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; let position_ids = Tensor::arange(0, cfg.max_position_embeddings as u32, vb.device())?.unsqueeze(0)?; Ok(Self { word_embedddings, position_embeddings, layer_norm, position_ids, }) } fn forward(&self, xs: &Tensor, past_kv_len: usize) -> Result<Tensor> { let seq_len = xs.dim(1)?; let position_ids = self.position_ids.narrow(1, past_kv_len, seq_len)?; let embeddings = self.word_embedddings.forward(xs)?; let position_embeddings = self.position_embeddings.forward(&position_ids)?; (embeddings + position_embeddings)?.apply(&self.layer_norm) } } #[derive(Debug, Clone)] struct TextSelfAttention { query: Linear, key: Linear, value: Linear, attention_head_size: usize, num_attention_heads: usize, attention_scale: f64, kv_cache: Option<(Tensor, Tensor)>, } impl TextSelfAttention { fn new(cfg: &Config, is_cross_attention: bool, vb: VarBuilder) -> Result<Self> { let num_attention_heads = cfg.num_attention_heads; let attention_head_size = cfg.hidden_size / num_attention_heads; let all_head_size = cfg.num_attention_heads * attention_head_size; let query = linear(cfg.hidden_size, all_head_size, vb.pp("query"))?; let in_size = if is_cross_attention { cfg.encoder_hidden_size } else { cfg.hidden_size }; let key = linear(in_size, all_head_size, vb.pp("key"))?; let value = linear(in_size, all_head_size, vb.pp("value"))?; let attention_scale = 1f64 / (attention_head_size as f64).sqrt(); Ok(Self { query, key, value, attention_head_size, num_attention_heads, attention_scale, kv_cache: None, }) } fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> { let (b_size, seq_len, _) = xs.dims3()?; xs.reshape(( b_size, seq_len, self.num_attention_heads, self.attention_head_size, ))? .permute((0, 2, 1, 3)) } fn reset_kv_cache(&mut self) { self.kv_cache = None } fn forward( &mut self, xs: &Tensor, encoder_hidden_states: Option<&Tensor>, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let query = self .transpose_for_scores(&self.query.forward(xs)?)? .contiguous()?; let (key, value) = match encoder_hidden_states { None => { let key = self.transpose_for_scores(&self.key.forward(xs)?)?; let value = self.transpose_for_scores(&self.value.forward(xs)?)?; let (key, value) = match &self.kv_cache { None => (key, value), Some((prev_key, prev_value)) => { let key = Tensor::cat(&[prev_key, &key], 2)?; let value = Tensor::cat(&[prev_value, &value], 2)?; (key, value) } }; self.kv_cache = Some((key.clone(), value.clone())); (key, value) } Some(xs) => { let key = self.transpose_for_scores(&self.key.forward(xs)?)?; let value = self.transpose_for_scores(&self.value.forward(xs)?)?; // no kv-cache in this case, but the results could probably be memoized. (key, value) } }; let key = key.contiguous()?; let value = value.contiguous()?; let attention_scores = query.matmul(&key.t()?)?; let attention_scores = (attention_scores * self.attention_scale)?; let attention_scores = match attention_mask { Some(mask) => attention_scores.broadcast_add(mask)?, None => attention_scores, }; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; attention_probs .matmul(&value)? .permute((0, 2, 1, 3))? .flatten_from(D::Minus2) } } #[derive(Debug, Clone)] struct TextSelfOutput { dense: Linear, layer_norm: LayerNorm, } impl TextSelfOutput { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; Ok(Self { dense, layer_norm }) } fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { (xs.apply(&self.dense) + input_tensor)?.apply(&self.layer_norm) } } #[derive(Debug, Clone)] struct TextAttention { self_: TextSelfAttention, output: TextSelfOutput, } impl TextAttention { fn new(cfg: &Config, is_cross_attention: bool, vb: VarBuilder) -> Result<Self> { let self_ = TextSelfAttention::new(cfg, is_cross_attention, vb.pp("self"))?; let output = TextSelfOutput::new(cfg, vb.pp("output"))?; Ok(Self { self_, output }) } fn reset_kv_cache(&mut self) { self.self_.reset_kv_cache() } fn forward( &mut self, xs: &Tensor, encoder_hidden_states: Option<&Tensor>, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let self_outputs = self .self_ .forward(xs, encoder_hidden_states, attention_mask)?; self.output.forward(&self_outputs, xs) } } #[derive(Debug, Clone)] struct TextIntermediate { dense: Linear, intermediate_act_fn: candle_nn::Activation, } impl TextIntermediate { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("dense"))?; Ok(Self { dense, intermediate_act_fn: cfg.hidden_act, }) } } impl Module for TextIntermediate { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense)?.apply(&self.intermediate_act_fn) } } #[derive(Debug, Clone)] struct TextOutput { dense: Linear, layer_norm: LayerNorm, } impl TextOutput { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; Ok(Self { dense, layer_norm }) } fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { (xs.apply(&self.dense)? + input_tensor)?.apply(&self.layer_norm) } } #[derive(Debug, Clone)] struct TextLayer { attention: TextAttention, cross_attention: Option<TextAttention>, intermediate: TextIntermediate, output: TextOutput, } impl TextLayer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention = TextAttention::new(cfg, false, vb.pp("attention"))?; let cross_attention = if cfg.is_decoder { Some(TextAttention::new(cfg, true, vb.pp("crossattention"))?) } else { None }; let intermediate = TextIntermediate::new(cfg, vb.pp("intermediate"))?; let output = TextOutput::new(cfg, vb.pp("output"))?; Ok(Self { attention, cross_attention, intermediate, output, }) } fn reset_kv_cache(&mut self) { self.attention.reset_kv_cache(); if let Some(ca) = &mut self.cross_attention { ca.reset_kv_cache() } } fn forward( &mut self, xs: &Tensor, encoder_hidden_states: &Tensor, attention_mask: &Tensor, ) -> Result<Tensor> { let attention_output = self.attention.forward(xs, None, Some(attention_mask))?; let attention_output = match &mut self.cross_attention { Some(ca) => ca.forward(&attention_output, Some(encoder_hidden_states), None)?, None => candle::bail!("expected some cross-attn"), }; let intermediate_output = self.intermediate.forward(&attention_output)?; self.output.forward(&intermediate_output, &attention_output) } } #[derive(Debug, Clone)] struct TextEncoder { layers: Vec<TextLayer>, } impl TextEncoder { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("layer"); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); for i in 0..cfg.num_hidden_layers { let layer = TextLayer::new(cfg, vb.pp(i))?; layers.push(layer) } Ok(Self { layers }) } fn reset_kv_cache(&mut self) { self.layers.iter_mut().for_each(|l| l.reset_kv_cache()) } fn forward( &mut self, xs: &Tensor, encoder_hidden_states: &Tensor, attention_mask: &Tensor, ) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter_mut() { xs = layer.forward(&xs, encoder_hidden_states, attention_mask)? } Ok(xs) } } #[derive(Debug, Clone)] pub struct TextPooler { dense: Linear, } impl TextPooler { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?; Ok(Self { dense }) } } impl Module for TextPooler { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.narrow(D::Minus1, 0, 1)? .squeeze(D::Minus1)? .apply(&self.dense)? .tanh() } } #[derive(Debug, Clone)] struct TextPredictionHeadTransform { dense: Linear, transform_act_fn: candle_nn::Activation, layer_norm: LayerNorm, } impl TextPredictionHeadTransform { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; Ok(Self { dense, transform_act_fn: cfg.hidden_act, layer_norm, }) } } impl Module for TextPredictionHeadTransform { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense)? .apply(&self.transform_act_fn)? .apply(&self.layer_norm) } } #[derive(Debug, Clone)] struct TextLMPredictionHead { transform: TextPredictionHeadTransform, decoder: Linear, } impl TextLMPredictionHead { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let transform = TextPredictionHeadTransform::new(cfg, vb.pp("transform"))?; let weight = vb.get((cfg.vocab_size, cfg.hidden_size), "decoder.weight")?; let bias = vb.get(cfg.vocab_size, "bias")?; let decoder = Linear::from_weights(weight, Some(bias)); Ok(Self { transform, decoder }) } } impl Module for TextLMPredictionHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.transform)?.apply(&self.decoder) } } #[derive(Debug, Clone)] struct TextOnlyMLMHead { predictions: TextLMPredictionHead, } impl TextOnlyMLMHead { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let predictions = TextLMPredictionHead::new(cfg, vb.pp("predictions"))?; Ok(Self { predictions }) } } impl Module for TextOnlyMLMHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.predictions.forward(xs) } } #[derive(Debug, Clone)] struct TextModel { embeddings: TextEmbeddings, encoder: TextEncoder, past_kv_len: usize, // We do not need the pooler for caption generation } impl TextModel { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let embeddings = TextEmbeddings::new(cfg, vb.pp("embeddings"))?; let encoder = TextEncoder::new(cfg, vb.pp("encoder"))?; Ok(Self { embeddings, encoder, past_kv_len: 0, }) } fn forward( &mut self, input_ids: &Tensor, encoder_hidden_states: &Tensor, attention_mask: &Tensor, ) -> Result<Tensor> { let (_b_sz, seq_len) = input_ids.dims2()?; let embedding_output = self.embeddings.forward(input_ids, self.past_kv_len)?; let sequence_output = self.encoder .forward(&embedding_output, encoder_hidden_states, attention_mask)?; self.past_kv_len += seq_len; // We're interested in the sequence-output rather than the pooled-output. Ok(sequence_output) } fn reset_kv_cache(&mut self) { self.past_kv_len = 0; self.encoder.reset_kv_cache(); } } #[derive(Debug, Clone)] pub struct TextLMHeadModel { bert: TextModel, cls: TextOnlyMLMHead, } impl TextLMHeadModel { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let bert = TextModel::new(cfg, vb.pp("bert"))?; let cls = TextOnlyMLMHead::new(cfg, vb.pp("cls"))?; Ok(Self { bert, cls }) } pub fn forward( &mut self, input_ids: &Tensor, encoder_hidden_states: &Tensor, ) -> Result<Tensor> { let seq_len = input_ids.dim(1)?; let mask: Vec<_> = (0..seq_len) .flat_map(|i| (0..seq_len).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 })) .collect(); let mask = Tensor::from_vec(mask, (seq_len, seq_len), input_ids.device())?; let sequence_output = self.bert.forward(input_ids, encoder_hidden_states, &mask)?; let prediction_scores = self.cls.forward(&sequence_output)?; // return_logits is false so we don't discard the last sequence element. Ok(prediction_scores) } pub fn reset_kv_cache(&mut self) { self.bert.reset_kv_cache() } }
candle/candle-transformers/src/models/blip_text.rs/0
{ "file_path": "candle/candle-transformers/src/models/blip_text.rs", "repo_id": "candle", "token_count": 7345 }
//! Implementation of the DINOv2 revision (4 regularization) //! //! The DINOv2-reg4 model is a variant of DINOv2 that adds 4 regularization tokens to the //! original architecture. This implementation is specifically trained for plant species //! classification on the PlantCLEF2024 dataset with 7,806 classes. //! //! - [Paper](https://arxiv.org/abs/2309.16588). DINOv2: Learning Robust Visual Features without Supervision //! - [GH Repo](https://github.com/facebookresearch/dinov2) //! //! # Example //! //! ```bash //! # Download classes names and a plant picture to identify //! # see candle/examples/dinov2reg4 for full code. //! //! # Perform inference //! cargo run \ //! --example dinov2reg4 \ //! --release -- \ //! --image <orchid-file> //! //! > Orchis simia Lam. : 45.55% //! > Orchis × bergonii Nanteuil: 9.80% //! > Orchis italica Poir. : 9.66% //! > Orchis × angusticruris Franch.: 2.76% //! > Orchis × bivonae Tod. : 2.54% //! ``` //! //! <div align=center> //! <img src="https://bs.plantnet.org/image/o/bd2d3830ac3270218ba82fd24e2290becd01317c" alt="" width=320> //! </div> //! use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; const IMG_SIZE: usize = 518; const PATCH_SIZE: usize = 14; const NUM_CLASSES: usize = 7806; // PlantCLEF2024 DINOv2 (https://zenodo.org/records/10848263) fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> { if bias { candle_nn::linear(in_dim, out_dim, vb) } else { candle_nn::linear_no_bias(in_dim, out_dim, vb) } } #[derive(Debug)] struct Attention { qkv: Linear, proj: Linear, num_heads: usize, scale: f64, } impl Attention { fn new( vb: VarBuilder, dim: usize, num_heads: usize, qkv_bias: bool, proj_bias: bool, ) -> Result<Self> { let qkv = linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?; let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?; let scale = 1. / ((dim / num_heads) as f64).sqrt(); Ok(Self { qkv, proj, num_heads, scale, }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, n, c) = xs.dims3()?; let qkv = self .qkv .forward(xs)? .reshape((b, n, 3, self.num_heads, c / self.num_heads))? .transpose(1, 2)? // 02134 .transpose(0, 1)? // 20134 .transpose(2, 3)?; // 20314 let q = (qkv.i(0)? * self.scale)?; let k = qkv.i(1)?.contiguous()?; let v = qkv.i(2)?.contiguous()?; let attn = candle_nn::ops::softmax(&q.matmul(&k.t()?)?, D::Minus1)?; let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?; self.proj.forward(&attn) } } #[derive(Debug)] struct LayerScale { gamma: Tensor, } impl LayerScale { fn new(vb: VarBuilder, dim: usize) -> Result<Self> { let gamma = vb.get(dim, "gamma")?; Ok(Self { gamma }) } } impl Module for LayerScale { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.broadcast_mul(&self.gamma) } } #[derive(Debug)] struct Mlp { fc1: Linear, fc2: Linear, } impl Mlp { fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> { let out_features = in_features; let fc1 = linear(vb.pp("fc1"), in_features, hidden_features, bias)?; let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?; Ok(Self { fc1, fc2 }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.fc1.forward(xs)?.gelu()?; self.fc2.forward(&xs) } } #[derive(Debug)] struct Block { norm1: LayerNorm, attn: Attention, ls1: LayerScale, norm2: LayerNorm, mlp: Mlp, ls2: LayerScale, } impl Block { fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> { let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?; let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true)?; let ls1 = LayerScale::new(vb.pp("ls1"), dim)?; let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?; let mlp = Mlp::new(vb.pp("mlp"), dim, dim * 4, true)?; let ls2 = LayerScale::new(vb.pp("ls2"), dim)?; Ok(Self { norm1, attn, ls1, norm2, mlp, ls2, }) } } impl Module for Block { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = self .ls1 .forward(&self.attn.forward(&self.norm1.forward(xs)?)?)?; let xs = (xs + residual)?; let residual = &xs; let xs = self .ls2 .forward(&self.mlp.forward(&self.norm2.forward(&xs)?)?)?; xs + residual } } #[derive(Debug)] struct PatchEmbed { proj: candle_nn::Conv2d, patch_size: (usize, usize), num_patches: usize, } impl PatchEmbed { fn new( vb: VarBuilder, img_size: usize, patch_size: usize, in_chans: usize, embed_dim: usize, ) -> Result<Self> { let config = candle_nn::Conv2dConfig { stride: patch_size, ..Default::default() }; let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?; let num_patches = (img_size / patch_size) * (img_size / patch_size); Ok(Self { proj, patch_size: (patch_size, patch_size), num_patches, }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _c, h, w) = xs.dims4()?; let (patch_h, patch_w) = self.patch_size; if (h % patch_h) != 0 { candle::bail!("image height {h} is not a multiple of patch height {patch_h}") } if (w % patch_w) != 0 { candle::bail!("image width {w} is not a multiple of patch width {patch_w}") } let xs = self.proj.forward(xs)?; let (b, c, h, w) = xs.dims4()?; // flatten embeddings. xs.reshape((b, c, h * w))?.transpose(1, 2) } } #[derive(Debug)] pub struct DinoVisionTransformer { patch_embed: PatchEmbed, cls_token: Tensor, reg_token: Tensor, pos_embed: Tensor, blocks: Vec<Block>, norm: LayerNorm, head: Linear, } impl DinoVisionTransformer { pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> { let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?; let cls_token = vb.get((1, 1, embed_dim), "cls_token")?; let reg_token = vb.get((1, 4, embed_dim), "reg_token")?; let pos_embed = vb.get((1, patch_embed.num_patches, embed_dim), "pos_embed")?; let head = linear(vb.pp("head"), embed_dim, NUM_CLASSES, true)?; let norm = layer_norm(embed_dim, 1e-6, vb.pp("norm"))?; let vb_b = vb.pp("blocks"); let blocks = (0..depth) .map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads)) .collect::<Result<Vec<_>>>()?; Ok(Self { patch_embed, cls_token, reg_token, pos_embed, blocks, norm, head, }) } fn interpolate_pos_encoding(&self, xs: &Tensor, w: usize, h: usize) -> Result<Tensor> { let npatch = xs.dim(1)? - 1; let n = self.pos_embed.dim(1)? - 1; let sqrt_n = (n as f64).sqrt(); if npatch == n && w == h { return Ok(self.pos_embed.clone()); } let patch_pos_embed = &self.pos_embed; let dim = xs.dim(D::Minus1)?; let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1); let patch_pos_embed = patch_pos_embed .reshape((1, sqrt_n as usize, sqrt_n as usize, dim))? .transpose(2, 3)? .transpose(1, 2)?; // This uses bicubic interpolation in the original implementation. let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?; let el_count = patch_pos_embed.shape().elem_count(); patch_pos_embed .transpose(1, 2)? .transpose(2, 3)? .reshape((1, el_count / dim, dim)) } fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _nc, w, h) = xs.dims4()?; if (w != IMG_SIZE) || (h != IMG_SIZE) { panic!("Error: The input tensor should have the shape: Bx3x518x518."); } let xs = self.patch_embed.forward(xs)?; let xs = (&xs + &self.interpolate_pos_encoding(&xs, w, h)?)?; let xs = Tensor::cat(&[&self.cls_token, &self.reg_token, &xs], 1)?; Ok(xs) } } impl Module for DinoVisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.prepare_tokens_with_mask(xs)?; for blk in self.blocks.iter() { xs = blk.forward(&xs)? } let xs = self.norm.forward(&xs)?; let xs_norm_clstoken = xs.i((.., 0))?; self.head.forward(&xs_norm_clstoken) } } pub fn vit_small(vb: VarBuilder) -> Result<DinoVisionTransformer> { DinoVisionTransformer::new(vb, 12, 384, 6) } pub fn vit_base(vb: VarBuilder) -> Result<DinoVisionTransformer> { DinoVisionTransformer::new(vb, 12, 768, 12) }
candle/candle-transformers/src/models/dinov2reg4.rs/0
{ "file_path": "candle/candle-transformers/src/models/dinov2reg4.rs", "repo_id": "candle", "token_count": 4809 }
//! Granite is a Long Context Transformer Language Model. //! //! A high performance transformer model optimized for efficient processing //! of very long context sequences //! //! Based on implementation from [Nod.ai](https://github.com/nod-ai/granite) use super::with_tracing::{linear_no_bias as linear, Linear, RmsNorm}; use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{embedding, Embedding, Module, VarBuilder}; use std::{collections::HashMap, f32::consts::PI}; pub const DEFAULT_MAX_SEQ_LEN: usize = 4096; #[derive(Debug, Clone, serde::Deserialize, Default)] pub enum GraniteRopeType { #[serde(rename = "granite")] Granite, #[default] #[serde(rename = "default")] Default, } #[derive(Debug, Clone, serde::Deserialize, Default)] pub struct GraniteRopeConfig { pub factor: f32, pub low_freq_factor: f32, pub high_freq_factor: f32, pub original_max_position_embeddings: usize, pub rope_type: GraniteRopeType, } #[derive(Debug, Clone, serde::Deserialize)] #[serde(untagged)] pub enum GraniteEosToks { Single(u32), Multiple(Vec<u32>), } #[derive(Debug, Clone, serde::Deserialize)] pub struct GraniteConfig { pub hidden_size: usize, pub intermediate_size: usize, pub vocab_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: Option<usize>, pub rms_norm_eps: f64, #[serde(default = "default_rope")] pub rope_theta: f32, pub bos_token_id: Option<u32>, pub eos_token_id: Option<GraniteEosToks>, pub rope_scaling: Option<GraniteRopeConfig>, pub max_position_embeddings: usize, } impl GraniteConfig { pub fn num_key_value_heads(&self) -> usize { self.num_key_value_heads.unwrap_or(self.num_attention_heads) } } fn default_rope() -> f32 { 10_000.0 } impl GraniteConfig { pub fn into_config(self, use_flash_attn: bool) -> Config { Config { hidden_size: self.hidden_size, intermediate_size: self.intermediate_size, vocab_size: self.vocab_size, num_hidden_layers: self.num_hidden_layers, num_attention_heads: self.num_attention_heads, num_key_value_heads: self.num_key_value_heads(), rms_norm_eps: self.rms_norm_eps, rope_theta: self.rope_theta, use_flash_attn, bos_token_id: self.bos_token_id, eos_token_id: self.eos_token_id, rope_scaling: self.rope_scaling, max_position_embeddings: self.max_position_embeddings, } } } #[derive(Debug, Clone)] pub struct Config { pub hidden_size: usize, pub intermediate_size: usize, pub vocab_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub use_flash_attn: bool, pub rms_norm_eps: f64, pub rope_theta: f32, pub bos_token_id: Option<u32>, pub eos_token_id: Option<GraniteEosToks>, pub rope_scaling: Option<GraniteRopeConfig>, pub max_position_embeddings: usize, } #[derive(Debug, Clone)] pub struct Cache { masks: HashMap<usize, Tensor>, pub use_kv_cache: bool, kvs: Vec<Option<(Tensor, Tensor)>>, cos: Tensor, sin: Tensor, device: Device, } fn calculate_default_inv_freq(cfg: &Config) -> Vec<f32> { let head_dim = cfg.hidden_size / cfg.num_attention_heads; (0..head_dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / head_dim as f32)) .collect() } impl Cache { pub fn new(use_kv_cache: bool, dtype: DType, config: &Config, device: &Device) -> Result<Self> { // precompute freqs_cis let theta = match &config.rope_scaling { None | Some(GraniteRopeConfig { rope_type: GraniteRopeType::Default, .. }) => calculate_default_inv_freq(config), Some(rope_scaling) => { let low_freq_wavelen = rope_scaling.original_max_position_embeddings as f32 / rope_scaling.low_freq_factor; let high_freq_wavelen = rope_scaling.original_max_position_embeddings as f32 / rope_scaling.high_freq_factor; calculate_default_inv_freq(config) .into_iter() .map(|freq| { let wavelen = 2. * PI / freq; if wavelen < high_freq_wavelen { freq } else if wavelen > low_freq_wavelen { freq / rope_scaling.factor } else { let smooth = (rope_scaling.original_max_position_embeddings as f32 / wavelen - rope_scaling.low_freq_factor) / (rope_scaling.high_freq_factor - rope_scaling.low_freq_factor); (1. - smooth) * freq / rope_scaling.factor + smooth * freq } }) .collect::<Vec<_>>() } }; let theta = Tensor::new(theta, device)?; let idx_theta = Tensor::arange(0, config.max_position_embeddings as u32, device)? .to_dtype(DType::F32)? .reshape((config.max_position_embeddings, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; let cos = idx_theta.cos()?.to_dtype(dtype)?; let sin = idx_theta.sin()?.to_dtype(dtype)?; Ok(Self { masks: HashMap::new(), use_kv_cache, kvs: vec![None; config.num_hidden_layers], device: device.clone(), cos, sin, }) } fn mask(&mut self, t: usize) -> Result<Tensor> { if let Some(mask) = self.masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), &self.device)?; self.masks.insert(t, mask.clone()); Ok(mask) } } } #[derive(Debug, Clone)] struct CausalSelfAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_attention_heads: usize, num_key_value_heads: usize, head_dim: usize, use_flash_attn: bool, span: tracing::Span, span_rot: tracing::Span, max_position_embeddings: usize, } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> { let _enter = self.span_rot.enter(); let (_b_sz, _, seq_len, _hidden_size) = x.dims4()?; let cos = cache.cos.narrow(0, index_pos, seq_len)?; let sin = cache.sin.narrow(0, index_pos, seq_len)?; candle_nn::rotary_emb::rope(x, &cos, &sin) } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, seq_len, hidden_size) = x.dims3()?; let q = self.q_proj.forward(x)?; let k = self.k_proj.forward(x)?; let v = self.v_proj.forward(x)?; let q = q .reshape((b_sz, seq_len, self.num_attention_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let k = k .reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let mut v = v .reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))? .transpose(1, 2)?; let q = self.apply_rotary_emb(&q, index_pos, cache)?; let mut k = self.apply_rotary_emb(&k, index_pos, cache)?; if cache.use_kv_cache { if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] { k = Tensor::cat(&[cache_k, &k], 2)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 2)?.contiguous()?; let k_seq_len = k.dims()[1]; if k_seq_len > self.max_position_embeddings { k = k .narrow( D::Minus1, k_seq_len - self.max_position_embeddings, self.max_position_embeddings, )? .contiguous()? } let v_seq_len = v.dims()[1]; if v_seq_len > 2 * self.max_position_embeddings { v = v .narrow( D::Minus1, v_seq_len - self.max_position_embeddings, self.max_position_embeddings, )? .contiguous()? } } cache.kvs[block_idx] = Some((k.clone(), v.clone())) } let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let y = if self.use_flash_attn { // flash-attn expects (b_sz, seq_len, nheads, head_dim) let q = q.transpose(1, 2)?; let k = k.transpose(1, 2)?; let v = v.transpose(1, 2)?; let softmax_scale = 1f32 / (self.head_dim as f32).sqrt(); flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)?.transpose(1, 2)? } else { let in_dtype = q.dtype(); let q = q.to_dtype(DType::F32)?; let k = k.to_dtype(DType::F32)?; let v = v.to_dtype(DType::F32)?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let att = if seq_len == 1 { att } else { let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?; masked_fill(&att, &mask, f32::NEG_INFINITY)? }; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)? }; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, hidden_size])?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { crate::utils::repeat_kv(x, self.num_attention_heads / self.num_key_value_heads) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "attn"); let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot"); let size_in = cfg.hidden_size; let size_q = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_attention_heads; let size_kv = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_key_value_heads; let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?; let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?; let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?; let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_attention_heads: cfg.num_attention_heads, num_key_value_heads: cfg.num_key_value_heads, head_dim: cfg.hidden_size / cfg.num_attention_heads, use_flash_attn: cfg.use_flash_attn, span, span_rot, max_position_embeddings: cfg.max_position_embeddings, }) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone)] struct Mlp { c_fc1: Linear, c_fc2: Linear, c_proj: Linear, span: tracing::Span, } impl Mlp { fn forward(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let x = (candle_nn::ops::silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "mlp"); let h_size = cfg.hidden_size; let i_size = cfg.intermediate_size; let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?; let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?; let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?; Ok(Self { c_fc1, c_fc2, c_proj, span, }) } } #[derive(Debug, Clone)] struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, span: tracing::Span, } impl Block { fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let _enter = self.span.enter(); let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "block"); let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?; let mlp = Mlp::load(vb.pp("mlp"), cfg)?; let rms_1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let rms_2 = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { rms_1, attn, rms_2, mlp, span, }) } } #[derive(Debug, Clone)] pub struct Granite { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, } impl Granite { pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> { let (_b_sz, seq_len) = x.dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx, cache)?; } let x = self.ln_f.forward(&x)?; let x = x.i((.., seq_len - 1, ..))?.contiguous()?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wte = embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; let ln_f = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.num_hidden_layers) .map(|i| Block::load(vb.pp(format!("model.layers.{i}")), cfg).unwrap()) .collect(); Ok(Self { wte, blocks, ln_f, lm_head, }) } }
candle/candle-transformers/src/models/granite.rs/0
{ "file_path": "candle/candle-transformers/src/models/granite.rs", "repo_id": "candle", "token_count": 8417 }
// Copyright (c) Kyutai, all rights reserved. // This source code is licensed under the license found in the // LICENSE file in the root directory of this source tree. use candle::{IndexOp, Layout, Result, Shape, Tensor, D}; use candle_nn::{linear, Linear, VarBuilder}; struct CodebookEncode; impl candle::CustomOp2 for CodebookEncode { fn name(&self) -> &'static str { "cb" } fn cpu_fwd( &self, lhs_storage: &candle::CpuStorage, lhs_layout: &Layout, rhs_storage: &candle::CpuStorage, rhs_layout: &Layout, ) -> Result<(candle::CpuStorage, Shape)> { use rayon::prelude::*; let (lhs_dim1, lhs_dim2) = lhs_layout.shape().dims2()?; let (rhs_dim1, rhs_dim2) = rhs_layout.shape().dims2()?; if lhs_dim2 != rhs_dim2 { candle::bail!("CodebookEncode, mismatch on last dim, {lhs_layout:?} {rhs_layout:?}"); } if lhs_dim2 == 0 { candle::bail!("CodebookEncode, empty last dim {lhs_layout:?}") } let lhs = match lhs_layout.contiguous_offsets() { None => candle::bail!("CodebookEncode, lhs has to be contiguous, got {lhs_layout:?}"), Some((o1, o2)) => { let slice = lhs_storage.as_slice::<f32>()?; &slice[o1..o2] } }; let rhs = match rhs_layout.contiguous_offsets() { None => candle::bail!("CodebookEncode, rhs has to be contiguous, got {rhs_layout:?}"), Some((o1, o2)) => { let slice = rhs_storage.as_slice::<f32>()?; &slice[o1..o2] } }; let dst = (0..lhs_dim1) .into_par_iter() .map(|idx1| { let mut where_min = 0; let mut min_dist = f32::INFINITY; let lhs = &lhs[idx1 * lhs_dim2..(idx1 + 1) * lhs_dim2]; for idx2 in 0..rhs_dim1 { let rhs = &rhs[idx2 * rhs_dim2..(idx2 + 1) * rhs_dim2]; let mut dist = 0f32; for (a, b) in lhs.iter().zip(rhs.iter()) { dist += (a - b) * (a - b) } if dist < min_dist { min_dist = dist; where_min = idx2; } } where_min as u32 }) .collect(); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (lhs_dim1,).into())) } } #[allow(unused)] #[derive(Debug, Clone)] pub struct EuclideanCodebook { initialized: Tensor, cluster_usage: Tensor, embedding_sum: Tensor, embedding: Tensor, c2: Tensor, epsilon: f64, dim: usize, span_encode: tracing::Span, span_decode: tracing::Span, } impl EuclideanCodebook { pub fn new(dim: usize, codebook_size: usize, vb: VarBuilder) -> Result<Self> { let epsilon = 1e-5; let initialized = vb.get(1, "initialized")?; let cluster_usage = vb.get(codebook_size, "cluster_usage")?; let embedding_sum = vb.get((codebook_size, dim), "embed_sum")?; let embedding = { let cluster_usage = cluster_usage.maximum(epsilon)?.unsqueeze(1)?; embedding_sum.broadcast_div(&cluster_usage)? }; let c2 = ((&embedding * &embedding)?.sum(D::Minus1)? / 2.0)?; Ok(Self { initialized, cluster_usage, embedding_sum, embedding, c2, epsilon, dim, span_encode: tracing::span!(tracing::Level::TRACE, "euclidean-encode"), span_decode: tracing::span!(tracing::Level::TRACE, "euclidean-encode"), }) } pub fn encode_very_slow(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span_encode.enter(); let mut target_shape = xs.dims().to_vec(); target_shape.pop(); let xs = xs.flatten_to(D::Minus2)?; let _ = xs.dims2()?; // TODO: avoid repeating this. let cluster_usage = self.cluster_usage.maximum(self.epsilon)?.unsqueeze(1)?; let embedding = self.embedding_sum.broadcast_div(&cluster_usage)?; // Manual cdist implementation. let diff = xs.unsqueeze(1)?.broadcast_sub(&embedding.unsqueeze(0)?)?; let dists = diff.sqr()?.sum(D::Minus1)?; let codes = dists.argmin(D::Minus1)?; codes.reshape(target_shape) } pub fn encode_slow(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span_encode.enter(); let mut target_shape = xs.dims().to_vec(); target_shape.pop(); let xs = xs.flatten_to(D::Minus2)?; let _ = xs.dims2()?; let dot_prod = xs.matmul(&self.embedding.t()?)?; let codes = self.c2.broadcast_sub(&dot_prod)?.argmin(D::Minus1)?; codes.reshape(target_shape) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span_encode.enter(); let mut target_shape = xs.dims().to_vec(); target_shape.pop(); let xs = xs.flatten_to(D::Minus2)?; let _ = xs.dims2()?; let codes = Tensor::apply_op2(&xs, &self.embedding, CodebookEncode)?; codes.reshape(target_shape) } pub fn decode(&self, indexes: &Tensor) -> Result<Tensor> { let _enter = self.span_decode.enter(); // let ys = candle_nn::Embedding::new(self.embedding.clone(), self.dim).forward(xs)?; let mut final_dims = indexes.dims().to_vec(); final_dims.push(self.dim); let indexes = indexes.flatten_all()?; let values = self.embedding.index_select(&indexes, 0)?; let values = values.reshape(final_dims)?; Ok(values) } } #[allow(unused)] #[derive(Debug, Clone)] pub struct VectorQuantization { project_in: Option<Linear>, project_out: Option<Linear>, codebook: EuclideanCodebook, } impl VectorQuantization { pub fn new( dim: usize, codebook_size: usize, codebook_dim: Option<usize>, vb: VarBuilder, ) -> Result<Self> { let codebook_dim = codebook_dim.unwrap_or(dim); let (project_in, project_out) = if codebook_dim == dim { (None, None) } else { let p_in = linear(dim, codebook_dim, vb.pp("project_in"))?; let p_out = linear(codebook_dim, dim, vb.pp("project_out"))?; (Some(p_in), Some(p_out)) }; let codebook = EuclideanCodebook::new(codebook_dim, codebook_size, vb.pp("codebook"))?; Ok(Self { project_in, project_out, codebook, }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.t()?.apply(&self.project_in.as_ref())?; self.codebook.encode_slow(&xs) } pub fn decode(&self, codes: &Tensor) -> Result<Tensor> { let quantized = self.codebook.decode(codes)?; let quantized = match &self.project_out { None => quantized, Some(p) => quantized.apply(p)?, }; quantized.t() } } #[derive(Debug, Clone)] pub struct ResidualVectorQuantization { layers: Vec<VectorQuantization>, } impl ResidualVectorQuantization { pub fn new( n_q: usize, dim: usize, codebook_size: usize, codebook_dim: Option<usize>, vb: VarBuilder, ) -> Result<Self> { let vb = vb.pp("layers"); let mut layers = Vec::with_capacity(n_q); for i in 0..n_q { let layer = VectorQuantization::new(dim, codebook_size, codebook_dim, vb.pp(i))?; layers.push(layer) } Ok(Self { layers }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let mut codes = Vec::with_capacity(self.layers.len()); let mut residual = xs.clone(); for layer in self.layers.iter() { let indices = layer.encode(&residual)?; let quantized = layer.decode(&indices)?; residual = (residual - quantized)?; codes.push(indices) } Tensor::stack(&codes, 0) } pub fn decode(&self, xs: &Tensor) -> Result<Tensor> { if self.layers.is_empty() { candle::bail!("empty layers in ResidualVectorQuantization") } if self.layers.len() != xs.dim(0)? { candle::bail!( "mismatch between the number of layers {} and the code shape {:?}", self.layers.len(), xs.shape() ) } let mut quantized = self.layers[0].decode(&xs.i(0)?)?; for (i, layer) in self.layers.iter().enumerate().skip(1) { let xs = xs.i(i)?; quantized = (quantized + layer.decode(&xs))? } Ok(quantized) } } #[allow(unused)] #[derive(Debug, Clone)] pub struct ResidualVectorQuantizer { vq: ResidualVectorQuantization, input_proj: Option<candle_nn::Conv1d>, output_proj: Option<candle_nn::Conv1d>, } impl ResidualVectorQuantizer { pub fn new( dim: usize, input_dim: Option<usize>, output_dim: Option<usize>, n_q: usize, bins: usize, force_projection: bool, vb: VarBuilder, ) -> Result<Self> { let input_dim = input_dim.unwrap_or(dim); let output_dim = output_dim.unwrap_or(dim); let input_proj = if input_dim == dim && !force_projection { None } else { let c = candle_nn::conv1d_no_bias( input_dim, dim, 1, Default::default(), vb.pp("input_proj"), )?; Some(c) }; let output_proj = if output_dim == dim && !force_projection { None } else { let c = candle_nn::conv1d_no_bias( dim, output_dim, 1, Default::default(), vb.pp("output_proj"), )?; Some(c) }; let vq = ResidualVectorQuantization::new( n_q, dim, /* codebook_size */ bins, /* codebook_dim */ None, vb, )?; Ok(Self { vq, input_proj, output_proj, }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let codes = self.vq.encode(&xs.apply(&self.input_proj.as_ref())?)?; codes.transpose(0, 1) } pub fn decode(&self, codes: &Tensor) -> Result<Tensor> { // codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T]. let codes = codes.transpose(0, 1)?; let quantized = self.vq.decode(&codes)?; match &self.output_proj { None => Ok(quantized), Some(p) => quantized.apply(p), } } } // we do not use any codebook_offset at the moment. When reconstructing the codes, we could just // concatenate the indexes. #[derive(Debug, Clone)] pub struct SplitResidualVectorQuantizer { rvq_first: ResidualVectorQuantizer, rvq_rest: ResidualVectorQuantizer, n_q: usize, span_encode: tracing::Span, span_decode: tracing::Span, } impl SplitResidualVectorQuantizer { pub fn new( dim: usize, input_dim: Option<usize>, output_dim: Option<usize>, n_q: usize, bins: usize, vb: VarBuilder, ) -> Result<Self> { let rvq_first = ResidualVectorQuantizer::new( dim, input_dim, output_dim, 1, bins, true, vb.pp("semantic_residual_vector_quantizer"), )?; let rvq_rest = ResidualVectorQuantizer::new( dim, input_dim, output_dim, n_q - 1, bins, true, vb.pp("acoustic_residual_vector_quantizer"), )?; let span_encode = tracing::span!(tracing::Level::TRACE, "split-rvq-encode"); let span_decode = tracing::span!(tracing::Level::TRACE, "split-rvq-decode"); Ok(Self { rvq_first, rvq_rest, n_q, span_encode, span_decode, }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span_encode.enter(); let codes = self.rvq_first.encode(xs)?; if self.n_q > 1 { // We encode xs again here rather than the residual. The decomposition is not // hierarchical but rather having semantic tokens for rvq_first and the acoustic tokens // for rvq_rest. let rest_codes = self.rvq_rest.encode(xs)?; Tensor::cat(&[codes, rest_codes], 1) } else { Ok(codes) } } pub fn decode(&self, codes: &Tensor) -> Result<Tensor> { // codes is [B, K, T], with T frames, K nb of codebooks. let _enter = self.span_decode.enter(); let quantized = self.rvq_first.decode(&codes.i((.., ..1))?)?; let quantized = if self.n_q > 1 { (quantized + self.rvq_rest.decode(&codes.i((.., 1..))?))? } else { quantized }; Ok(quantized) } }
candle/candle-transformers/src/models/mimi/quantization.rs/0
{ "file_path": "candle/candle-transformers/src/models/mimi/quantization.rs", "repo_id": "candle", "token_count": 6873 }
//! MoonDream Model vision-to-text //! //! //! Moondream is a computer-vision model that can answer real-world questions about images. //! It's lightweight with only 1.6B parameters, enabling it to run on mobile phones and edge devices. //! [MoonDream Original Implementation](https://github.com/vikhyat/moondream) //! //! The model consists of: //! - Vision encoder using a ViT-style architecture //! - Text decoder based on Microsoft's Phi model //! - Vision projection module to align vision and text embeddings //! //! # Examples //! //! <img src="https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg" width="200"> //! //! ```bash //! # download an example image //! wget https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg //! //! # Now you can run Moondream from the `candle-examples` crate: //! cargo run --example moondream \ //! --release -- \ //! --prompt "What is the girl eating?" //! --image "./demo-1.jpg" //! //! > avavx: false, neon: true, simd128: false, f16c: false //! > temp: 0.00 repeat-penalty: 1.00 repeat-last-n: 64 //! > retrieved the files in 3.395583ms //! > Running on CPU, to run on GPU(metal), build this example with `--features metal` //! > loaded the model in 5.485493792s //! > loaded and encoded the image Tensor[dims 3, 378, 378; f32] in 4.801396417s //! > starting the inference loop //! > The girl is eating a hamburger.< //! > 9 tokens generated (0.68 token/s) //! ``` use crate::models::mixformer::{Config as PhiConfig, MixFormerSequentialForCausalLM as PhiModel}; use crate::models::with_tracing::{layer_norm, linear_b, LayerNorm, Linear}; use candle::{IndexOp, Module, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub phi_config: PhiConfig, pub vision_config: VisionConfig, } impl Config { pub fn v2() -> Self { Self { phi_config: PhiConfig::v1_5(), vision_config: VisionConfig::v2(), } } } fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> { let dim = q.dim(D::Minus1)?; let scale_factor = 1.0 / (dim as f64).sqrt(); let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?; candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v) } #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct VisionConfig { pub(crate) image_embedding_dim: usize, pub(crate) model_dim: usize, pub(crate) hidden_dim: usize, pub(crate) hidden_features: usize, pub(crate) embed_len: usize, pub(crate) embed_dim: usize, pub(crate) num_blocks: usize, pub(crate) num_heads: usize, pub(crate) act: candle_nn::Activation, } impl VisionConfig { pub fn v2() -> Self { Self { image_embedding_dim: 1152, model_dim: 2048, hidden_dim: 2048 * 4, hidden_features: 4304, embed_len: 729, embed_dim: 1152, num_blocks: 27, num_heads: 16, act: candle_nn::Activation::GeluPytorchTanh, } } } #[derive(Debug, Clone)] struct LinearPatchEmbedding { linear: Linear, } impl LinearPatchEmbedding { fn new(vb: VarBuilder) -> Result<Self> { let linear = linear_b(588, 1152, true, vb.pp("linear"))?; Ok(Self { linear }) } } impl Module for LinearPatchEmbedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.linear) } } #[derive(Debug, Clone)] struct Attention { num_heads: usize, head_dim: usize, qkv: Linear, proj: Linear, span: tracing::Span, } impl Attention { pub fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> { let qkv = linear_b(dim, dim * 3, true, vb.pp("qkv"))?; let proj = linear_b(dim, dim, true, vb.pp("proj"))?; Ok(Self { num_heads, head_dim: dim / num_heads, qkv, proj, span: tracing::span!(tracing::Level::TRACE, "vit-attn"), }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b, n, c) = xs.dims3()?; let qkv = xs .apply(&self.qkv)? .reshape((b, n, 3, self.num_heads, self.head_dim))? .permute((2, 0, 3, 1, 4))?; let (q, k, v) = ( qkv.i(0)?.contiguous()?, qkv.i(1)?.contiguous()?, qkv.i(2)?.contiguous()?, ); scaled_dot_product_attention(&q, &k, &v)? .transpose(1, 2)? .reshape((b, n, c))? .apply(&self.proj) } } #[derive(Debug, Clone)] struct VitBlock { attn: Attention, mlp: Mlp, norm1: LayerNorm, norm2: LayerNorm, span: tracing::Span, } impl VitBlock { fn new(vb: VarBuilder, dim: usize, num_heads: usize, cfg: &VisionConfig) -> Result<Self> { let attn = Attention::new(vb.pp("attn"), dim, num_heads)?; let mlp = Mlp::new(vb.pp("mlp"), dim, cfg.hidden_features, dim, cfg.act)?; let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?; let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?; Ok(Self { attn, mlp, norm1, norm2, span: tracing::span!(tracing::Level::TRACE, "vit-block"), }) } } impl Module for VitBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let ys = xs.apply(&self.norm1)?.apply(&self.attn)?; let xs = (xs + &ys)?; let ys = xs.apply(&self.norm2)?.apply(&self.mlp)?; let xs = (&xs + &ys)?; Ok(xs) } } #[derive(Debug, Clone)] struct VisionTransformer { patch_embed: LinearPatchEmbedding, pos_embed: Tensor, blocks: Vec<VitBlock>, norm: LayerNorm, span: tracing::Span, } impl VisionTransformer { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let patch_embed = LinearPatchEmbedding::new(vb.pp("patch_embed"))?; let pos_embed = vb.get((1, cfg.embed_len, cfg.embed_dim), "pos_embed")?; let blocks = (0..cfg.num_blocks) .map(|i| { VitBlock::new( vb.pp(format!("blocks.{}", i)), cfg.embed_dim, cfg.num_heads, cfg, ) }) .collect::<Result<_>>()?; let norm = layer_norm(cfg.embed_dim, 1e-5, vb.pp("norm"))?; Ok(Self { patch_embed, pos_embed, blocks, norm, span: tracing::span!(tracing::Level::TRACE, "vit"), }) } } impl Module for VisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = (&xs.apply(&self.patch_embed)? + &self.pos_embed)?; for block in self.blocks.iter() { xs = xs.apply(block)?; } xs.apply(&self.norm) } } #[derive(Debug, Clone)] pub struct Encoder { model: VisionTransformer, } impl Encoder { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let model = VisionTransformer::new(cfg, vb.pp("model.visual"))?; Ok(Self { model }) } } impl Module for Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.model) } } #[derive(Debug, Clone)] struct Mlp { fc1: Linear, act: candle_nn::Activation, fc2: Linear, span: tracing::Span, } impl Mlp { fn new( vb: VarBuilder, in_features: usize, hidden_features: usize, out_features: usize, act: candle_nn::Activation, ) -> Result<Self> { let fc1 = linear_b(in_features, hidden_features, true, vb.pp("fc1"))?; let fc2 = linear_b(hidden_features, out_features, true, vb.pp("fc2"))?; Ok(Self { fc1, act, fc2, span: tracing::span!(tracing::Level::TRACE, "mlp"), }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2) } } #[derive(Debug, Clone)] struct VisionProjection { mlp: Mlp, } impl VisionProjection { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let mlp = Mlp::new( vb.pp("mlp"), cfg.image_embedding_dim, cfg.hidden_dim, cfg.model_dim, cfg.act, )?; Ok(Self { mlp }) } } impl Module for VisionProjection { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.mlp) } } #[derive(Debug, Clone)] pub struct VisionEncoder { encoder: Encoder, projection: VisionProjection, } impl VisionEncoder { pub fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let projection = VisionProjection::new(cfg, vb.pp("projection"))?; Ok(Self { encoder, projection, }) } } impl Module for VisionEncoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, c, hp1, wp2) = xs.dims4()?; let (p1, p2) = (14, 14); let h = hp1 / p1; let w = wp2 / p2; xs.reshape((b, c, h, p1, h, p2))? .permute((0, 2, 4, 1, 3, 5))? .reshape((b, h * w, c * p1 * p2))? .apply(&self.encoder)? .apply(&self.projection) } } #[derive(Debug, Clone)] pub struct Model { pub text_model: PhiModel, pub vision_encoder: VisionEncoder, } impl Model { pub fn new(config: &Config, vb: VarBuilder) -> Result<Self> { let text_model = PhiModel::new_v2(&config.phi_config, vb.pp("text_model"))?; let vision_encoder = VisionEncoder::new(&config.vision_config, vb.pp("vision_encoder"))?; Ok(Self { text_model, vision_encoder, }) } pub fn vision_encoder(&self) -> &VisionEncoder { &self.vision_encoder } pub fn text_model(&mut self) -> &mut PhiModel { &mut self.text_model } }
candle/candle-transformers/src/models/moondream.rs/0
{ "file_path": "candle/candle-transformers/src/models/moondream.rs", "repo_id": "candle", "token_count": 4946 }
//! BLIP model implementation with quantization support. //! //! BLIP is a vision-language model for image understanding and generation tasks. //! This implementation provides quantization for reduced memory and compute. //! //! Key characteristics: //! - Vision encoder using ViT architecture //! - Text decoder using BERT-style transformer //! - Cross-attention between vision and text features //! - Support for 8-bit quantization //! //! References: //! - [BLIP Paper](https://arxiv.org/abs/2201.12086) //! - [Hugging Face Implementation](https://huggingface.co/docs/transformers/model_doc/blip) //! use super::quantized_blip_text as blip_text; use crate::quantized_nn::{layer_norm, linear, Linear}; pub use crate::quantized_var_builder::VarBuilder; use candle::{Module, Result, Tensor, D}; use candle_nn::{Conv2d, Conv2dConfig, LayerNorm}; pub type VisionConfig = super::blip::VisionConfig; pub type Config = super::blip::Config; #[derive(Debug, Clone)] struct VisionEmbeddings { class_embedding: Tensor, patch_embedding: Conv2d, position_embedding: Tensor, } impl VisionEmbeddings { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let class_embedding = vb .get((1, 1, cfg.hidden_size), "class_embedding")? .dequantize(vb.device())?; let conv_cfg = Conv2dConfig { stride: cfg.patch_size, ..Default::default() }; let pe_vb = vb.pp("patch_embedding"); let pe_weight = pe_vb .get( (cfg.hidden_size, 3, cfg.patch_size, cfg.patch_size), "weight", )? .dequantize(vb.device())?; let pe_bias = pe_vb .get(cfg.hidden_size, "bias")? .dequantize(vb.device())?; let patch_embedding = Conv2d::new(pe_weight, Some(pe_bias), conv_cfg); let num_patches1 = cfg.image_size / cfg.patch_size; let num_patches = num_patches1 * num_patches1; let num_positions = num_patches + 1; let position_embedding = vb .get((1, num_positions, cfg.hidden_size), "position_embedding")? .dequantize(vb.device())?; Ok(Self { class_embedding, patch_embedding, position_embedding, }) } } impl Module for VisionEmbeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let target_dtype = xs.dtype(); let b_size = xs.dim(0)?; let patch_embeds = xs.apply(&self.patch_embedding)?.flatten_from(2)?.t()?; let d = self.class_embedding.dim(D::Minus1)?; let class_embeds = self .class_embedding .broadcast_as((b_size, 1, d))? .to_dtype(target_dtype)?; let embeddings = Tensor::cat(&[&class_embeds, &patch_embeds], 1)?; let position_embedding = self.position_embedding.narrow(1, 0, embeddings.dim(1)?)?; embeddings.broadcast_add(&position_embedding) } } #[derive(Debug, Clone)] struct Attention { qkv: Linear, projection: Linear, scale: f64, num_heads: usize, } impl Attention { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let head_dim = embed_dim / num_heads; let scale = 1f64 / (head_dim as f64).sqrt(); let qkv = linear(embed_dim, 3 * embed_dim, vb.pp("qkv"))?; let projection = linear(embed_dim, embed_dim, vb.pp("projection"))?; Ok(Self { qkv, projection, scale, num_heads, }) } fn forward(&self, xs: &Tensor, attn_mask: Option<&Tensor>) -> Result<Tensor> { let (b_sz, tgt_len, embed_dim) = xs.dims3()?; let mixed_qkv = xs .apply(&self.qkv)? .reshape((b_sz, tgt_len, 3, self.num_heads, embed_dim / self.num_heads))? .permute((2, 0, 3, 1, 4))?; let query = mixed_qkv.get(0)?; let key = mixed_qkv.get(1)?; let value = mixed_qkv.get(2)?; let attention_scores = query.matmul(&key.t()?)?; let attention_scores = (attention_scores * self.scale)?; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; let attention_probs = match attn_mask { None => attention_probs, Some(attn_mask) => (attention_probs * attn_mask)?, }; attention_probs .matmul(&value)? .permute((0, 2, 1, 3))? .flatten_from(D::Minus2)? .apply(&self.projection) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { activation_fn: candle_nn::Activation, fc1: Linear, fc2: Linear, } impl MLP { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let fc1 = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("fc1"))?; let fc2 = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?; Ok(Self { activation_fn: cfg.hidden_act, fc1, fc2, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.fc1)? .apply(&self.activation_fn)? .apply(&self.fc2) } } #[derive(Debug, Clone)] struct EncoderLayer { self_attn: Attention, layer_norm1: LayerNorm, mlp: MLP, layer_norm2: LayerNorm, } impl EncoderLayer { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size; let self_attn = Attention::new(cfg, vb.pp("self_attn"))?; let layer_norm1 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm1"))?; let layer_norm2 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm2"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.layer_norm1)?; let xs = self.self_attn.forward(&xs, attention_mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.layer_norm2)?.apply(&self.mlp)?; xs + residual } } #[derive(Debug, Clone)] struct Encoder { layers: Vec<EncoderLayer>, } impl Encoder { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb = vb.pp("layers"); for i in 0..cfg.num_hidden_layers { let layer = EncoderLayer::new(cfg, vb.pp(i))?; layers.push(layer) } Ok(Self { layers }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, attention_mask)? } Ok(xs) } } #[derive(Debug, Clone)] pub struct VisionModel { embeddings: VisionEmbeddings, encoder: Encoder, post_layernorm: LayerNorm, } impl VisionModel { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embeddings = VisionEmbeddings::new(cfg, vb.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let post_layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_layernorm"))?; Ok(Self { embeddings, encoder, post_layernorm, }) } } impl Module for VisionModel { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.embeddings)?; let encoder_outputs = self.encoder.forward(&xs, None)?; // Return the last hidden state rather than pooled outputs. encoder_outputs.apply(&self.post_layernorm) } } #[derive(Debug, Clone)] pub struct BlipForConditionalGeneration { vision_model: VisionModel, text_decoder: blip_text::TextLMHeadModel, } impl BlipForConditionalGeneration { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vision_model = VisionModel::new(&cfg.vision_config, vb.pp("vision_model"))?; let text_decoder = blip_text::TextLMHeadModel::new(&cfg.text_config, vb.pp("text_decoder"))?; Ok(Self { vision_model, text_decoder, }) } pub fn vision_model(&self) -> &VisionModel { &self.vision_model } pub fn text_decoder(&mut self) -> &mut blip_text::TextLMHeadModel { &mut self.text_decoder } pub fn reset_kv_cache(&mut self) { self.text_decoder.reset_kv_cache(); } }
candle/candle-transformers/src/models/quantized_blip.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_blip.rs", "repo_id": "candle", "token_count": 4186 }
//! T5 model implementation with quantization support. //! //! T5 is an encoder-decoder model pre-trained on a multi-task mixture of supervised //! and unsupervised tasks. This implementation provides quantization for reduced //! memory and compute requirements. //! //! Key characteristics: //! - Encoder-decoder architecture //! - Layer normalization //! - Relative positional encodings //! - Support for 8-bit quantization //! //! References: //! - 📝 [T5 Paper](https://arxiv.org/abs/1910.10683) //! - 🤗 [Model Card](https://huggingface.co/t5-base) //! - 🤗 Original model from [T5](https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py) use crate::models::t5::{deserialize_feed_forward_proj_activation, ActivationWithOptionalGating}; use crate::models::with_tracing::QMatMul; use crate::quantized_nn::Embedding; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::Activation; use serde::Deserialize; use std::sync::Arc; fn default_relative_attention_max_distance() -> usize { 128 } fn default_is_decoder() -> bool { false } fn default_use_cache() -> bool { true } fn default_tie_word_embeddings() -> bool { true } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { vocab_size: usize, d_model: usize, d_kv: usize, d_ff: usize, num_layers: usize, num_decoder_layers: Option<usize>, num_heads: usize, relative_attention_num_buckets: usize, #[serde(default = "default_relative_attention_max_distance")] relative_attention_max_distance: usize, dropout_rate: f64, layer_norm_epsilon: f64, initializer_factor: f64, #[serde(default, deserialize_with = "deserialize_feed_forward_proj_activation")] pub feed_forward_proj: ActivationWithOptionalGating, #[serde(default = "default_tie_word_embeddings")] tie_word_embeddings: bool, #[serde(default = "default_is_decoder")] is_decoder: bool, is_encoder_decoder: bool, #[serde(default = "default_use_cache")] pub use_cache: bool, pub pad_token_id: usize, pub eos_token_id: usize, pub decoder_start_token_id: Option<usize>, } impl Default for Config { fn default() -> Self { Self { vocab_size: 32128, d_model: 512, d_kv: 64, d_ff: 2048, num_layers: 6, num_decoder_layers: None, num_heads: 8, relative_attention_num_buckets: 32, relative_attention_max_distance: 128, dropout_rate: 0.1, layer_norm_epsilon: 1e-6, initializer_factor: 1.0, feed_forward_proj: ActivationWithOptionalGating { gated: false, activation: Activation::Relu, }, tie_word_embeddings: true, is_decoder: false, is_encoder_decoder: true, use_cache: true, pad_token_id: 0, eos_token_id: 1, decoder_start_token_id: Some(0), } } } #[derive(Debug, Clone)] struct T5LayerNorm { weight: Tensor, variance_epsilon: f64, span: tracing::Span, } impl T5LayerNorm { fn load(h: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let weight = vb.get(h, "weight")?.dequantize(vb.device())?; Ok(Self { weight, variance_epsilon: eps, span: tracing::span!(tracing::Level::TRACE, "layer-norm"), }) } } impl Module for T5LayerNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let dtype = xs.dtype(); let xs_f32 = xs.to_dtype(DType::F32)?; // variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) let variance = xs_f32.sqr()?.mean_keepdim(D::Minus1)?; let xs = xs.broadcast_div(&(variance + self.variance_epsilon)?.sqrt()?)?; let xs = xs.to_dtype(dtype)?; let xs = xs.broadcast_mul(&self.weight)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5DenseActDense { wi: QMatMul, wo: QMatMul, act: Activation, span: tracing::Span, } impl T5DenseActDense { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wi = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi"))?; let wo = QMatMul::new(cfg.d_ff, cfg.d_model, vb.pp("wo"))?; Ok(Self { wi, wo, act: Activation::Relu, span: tracing::span!(tracing::Level::TRACE, "dense-act-dense"), }) } } impl Module for T5DenseActDense { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.wi.forward(xs)?; let xs = self.act.forward(&xs)?; let xs = self.wo.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5DenseGatedActDense { wi_0: QMatMul, wi_1: QMatMul, wo: QMatMul, act: Activation, span: tracing::Span, } impl T5DenseGatedActDense { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wi_0 = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi_0"))?; let wi_1 = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi_1"))?; let wo = QMatMul::new(cfg.d_ff, cfg.d_model, vb.pp("wo"))?; Ok(Self { wi_0, wi_1, wo, act: cfg.feed_forward_proj.activation, span: tracing::span!(tracing::Level::TRACE, "dense-gated-act-dense"), }) } } impl Module for T5DenseGatedActDense { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_gelu = self.act.forward(&self.wi_0.forward(xs)?)?; let hidden_linear = self.wi_1.forward(xs)?; let xs = hidden_gelu.broadcast_mul(&hidden_linear)?; let xs = self.wo.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5LayerFF { dense_act: Option<T5DenseActDense>, gated_dense_act: Option<T5DenseGatedActDense>, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerFF { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; let (dense_act, gated_dense_act) = if cfg.feed_forward_proj.gated { ( None, Some(T5DenseGatedActDense::load(vb.pp("DenseReluDense"), cfg)?), ) } else { ( Some(T5DenseActDense::load(vb.pp("DenseReluDense"), cfg)?), None, ) }; Ok(Self { dense_act, gated_dense_act, layer_norm, span: tracing::span!(tracing::Level::TRACE, "layer-ff"), }) } } impl Module for T5LayerFF { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let ys = self.layer_norm.forward(xs)?; let ys = match &self.dense_act { Some(dense_act) => dense_act.forward(&ys)?, None => self.gated_dense_act.as_ref().unwrap().forward(&ys)?, }; let xs = (xs + ys)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5Attention { q: QMatMul, k: QMatMul, v: QMatMul, o: QMatMul, n_heads: usize, d_kv: usize, relative_attention_bias: Option<Embedding>, relative_attention_num_buckets: usize, relative_attention_max_distance: usize, inner_dim: usize, use_cache: bool, kv_cache: Option<(Tensor, Tensor)>, span: tracing::Span, span_cache: tracing::Span, span_mm: tracing::Span, span_sm: tracing::Span, } impl T5Attention { fn load( has_relative_attention_bias: bool, decoder: bool, vb: VarBuilder, cfg: &Config, ) -> Result<Self> { let inner_dim = cfg.num_heads * cfg.d_kv; let q = QMatMul::new(cfg.d_model, inner_dim, vb.pp("q"))?; let k = QMatMul::new(cfg.d_model, inner_dim, vb.pp("k"))?; let v = QMatMul::new(cfg.d_model, inner_dim, vb.pp("v"))?; let o = QMatMul::new(inner_dim, cfg.d_model, vb.pp("o"))?; let relative_attention_bias = if has_relative_attention_bias { let emb = Embedding::new( cfg.relative_attention_num_buckets, cfg.num_heads, vb.pp("relative_attention_bias"), )?; Some(emb) } else { None }; Ok(Self { q, k, v, o, n_heads: cfg.num_heads, d_kv: cfg.d_kv, relative_attention_bias, relative_attention_num_buckets: cfg.relative_attention_num_buckets, relative_attention_max_distance: cfg.relative_attention_max_distance, inner_dim, use_cache: cfg.use_cache && decoder, kv_cache: None, span: tracing::span!(tracing::Level::TRACE, "attention"), span_cache: tracing::span!(tracing::Level::TRACE, "attention-cache"), span_mm: tracing::span!(tracing::Level::TRACE, "attention-mm"), span_sm: tracing::span!(tracing::Level::TRACE, "attention-sm"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, key_value_states: Option<&Tensor>, mask: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { // Performs Self-attention (if key_value_states is None) or attention // over source sentence (provided by key_value_states). let _enter = self.span.enter(); let kv_input = match key_value_states { None => xs, Some(key_value_states) => key_value_states, }; let (b_sz, q_len) = (xs.dim(0)?, xs.dim(1)?); let kv_len = kv_input.dim(1)?; let q = self.q.forward(xs)?; let k = self.k.forward(kv_input)?; let v = self.v.forward(kv_input)?; let q = q .reshape((b_sz, q_len, self.n_heads, self.d_kv))? .transpose(1, 2)? .contiguous()?; let mut k = k .reshape((b_sz, kv_len, self.n_heads, self.d_kv))? .transpose(1, 2)?; let mut v = v .reshape((b_sz, kv_len, self.n_heads, self.d_kv))? .transpose(1, 2)?; if self.use_cache && key_value_states.is_none() { let _enter = self.span_cache.enter(); if let Some((kv_cache_k, kv_cache_v)) = &self.kv_cache { k = Tensor::cat(&[kv_cache_k, &k], 2)?; v = Tensor::cat(&[kv_cache_v, &v], 2)?; }; self.kv_cache = Some((k.clone(), v.clone())); }; let k = k.contiguous()?; let v = v.contiguous()?; // TODO: Use flash_attn. let scores = { let _enter = self.span_mm.enter(); q.matmul(&k.t()?)? }; let scores = match mask { None => scores, Some(mask) => masked_fill( &scores, &mask .unsqueeze(0)? .unsqueeze(0)? .repeat((b_sz, self.n_heads))?, f32::NEG_INFINITY, )?, }; let (scores, position_bias) = match position_bias { Some(position_bias) => ( scores.broadcast_add(position_bias)?, Some(position_bias.clone()), ), None => match &self.relative_attention_bias { None => (scores, None), Some(relative_attention_bias) => { // This only handles the bidirectional case. let kv_len = k.dim(2)?; let (q_start, q_end) = match self.use_cache { true => ((kv_len - q_len) as u32, kv_len as u32), false => (0_u32, kv_len as u32), }; let num_buckets = self.relative_attention_num_buckets as u32 / 2; let max_exact = num_buckets / 2; let relative_position = (q_start..q_end) .map(|i| { (0..kv_len as u32) .map(|j| { if i < j { if j - i < max_exact { j - i + num_buckets } else { let b = f32::log( (j - i) as f32 / max_exact as f32, self.relative_attention_max_distance as f32 / max_exact as f32, ) * (num_buckets - max_exact) as f32; u32::min( max_exact + num_buckets + b as u32, self.relative_attention_num_buckets as u32 - 1, ) } } else if i - j < max_exact { i - j } else { let b = f32::log( (i - j) as f32 / max_exact as f32, self.relative_attention_max_distance as f32 / max_exact as f32, ) * (num_buckets - max_exact) as f32; max_exact + b as u32 } }) .collect::<Vec<u32>>() }) .collect::<Vec<Vec<_>>>(); let relative_buckets = Tensor::new(relative_position, q.device())?; let position_bias = relative_attention_bias .forward(&relative_buckets)? .permute((2, 0, 1))? .unsqueeze(0)?; (scores.broadcast_add(&position_bias)?, Some(position_bias)) // TODO: position_bias_masked? } }, }; let attn_weights = { let _enter = self.span_sm.enter(); candle_nn::ops::softmax_last_dim(&scores)? }; let attn_output = attn_weights.matmul(&v)?; let attn_output = attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.inner_dim))?; let attn_output = self.o.forward(&attn_output)?; Ok((attn_output, position_bias)) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct T5LayerSelfAttention { self_attention: T5Attention, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerSelfAttention { fn load(h: bool, d: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> { let self_attention = T5Attention::load(h, d, vb.pp("SelfAttention"), cfg)?; let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; Ok(Self { self_attention, layer_norm, span: tracing::span!(tracing::Level::TRACE, "self-attn"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, mask: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); let normed_xs = self.layer_norm.forward(xs)?; let (ys, position_bias) = self.self_attention .forward(&normed_xs, position_bias, None, mask)?; let ys = (xs + ys)?; Ok((ys, position_bias)) } fn clear_kv_cache(&mut self) { self.self_attention.clear_kv_cache() } } #[derive(Debug, Clone)] struct T5LayerCrossAttention { cross_attention: T5Attention, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerCrossAttention { fn load(decoder: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> { let cross_attention = T5Attention::load(false, decoder, vb.pp("EncDecAttention"), cfg)?; let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; Ok(Self { cross_attention, layer_norm, span: tracing::span!(tracing::Level::TRACE, "cross-attn"), }) } fn forward( &mut self, hidden_states: &Tensor, position_bias: Option<&Tensor>, key_value_states: &Tensor, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); let normed_hidden_states = self.layer_norm.forward(hidden_states)?; let (ys, position_bias) = self.cross_attention.forward( &normed_hidden_states, position_bias, Some(key_value_states), None, )?; let ys = (hidden_states + ys)?; Ok((ys, position_bias)) } fn clear_kv_cache(&mut self) { self.cross_attention.clear_kv_cache() } } #[derive(Debug, Clone)] struct T5Block { self_attn: T5LayerSelfAttention, cross_attn: Option<T5LayerCrossAttention>, ff: T5LayerFF, span: tracing::Span, } impl T5Block { fn load( has_relative_attention_bias: bool, decoder: bool, vb: VarBuilder, cfg: &Config, ) -> Result<Self> { let vb = vb.pp("layer"); let self_attn = T5LayerSelfAttention::load(has_relative_attention_bias, decoder, vb.pp("0"), cfg)?; let cross_attn = if cfg.is_decoder { Some(T5LayerCrossAttention::load(decoder, vb.pp("1"), cfg)?) } else { None }; let ff_i = if cross_attn.is_some() { 2 } else { 1 }; let ff = T5LayerFF::load(vb.pp(ff_i), cfg)?; Ok(Self { self_attn, cross_attn, ff, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, encoder_hidden_states: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); // TODO: Cache masks let mask = match self.cross_attn.is_some() { true => { let mask_len = xs.dim(1)?; // If the input seq length is 1, no need for a mask, this is also helpful to avoid shape // issues when using the KV cache in the decoder. if mask_len <= 1 { None } else { Some(get_mask(mask_len, xs.device())?) } } false => None, }; let (mut xs, position_bias) = self.self_attn.forward(xs, position_bias, mask.as_ref())?; // TODO: clamp for f16? if let Some(cross_attn) = &mut self.cross_attn { (xs, _) = cross_attn.forward(&xs, None, encoder_hidden_states.unwrap())?; // TODO: clamp for f16? } let xs = self.ff.forward(&xs)?; // TODO: clamp for f16? Ok((xs, position_bias)) } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache(); self.cross_attn.iter_mut().for_each(|c| c.clear_kv_cache()); } } #[derive(Debug, Clone)] struct T5Stack { block: Vec<T5Block>, shared: Arc<Embedding>, final_layer_norm: T5LayerNorm, span: tracing::Span, } impl T5Stack { fn load(decoder: bool, vb: VarBuilder, shared: &Arc<Embedding>, cfg: &Config) -> Result<Self> { let block = (0..cfg.num_layers) .map(|i| T5Block::load(i == 0, decoder, vb.pp(format!("block.{i}")), cfg)) .collect::<Result<Vec<_>>>()?; let final_layer_norm = T5LayerNorm::load( cfg.d_model, cfg.layer_norm_epsilon, vb.pp("final_layer_norm"), )?; Ok(Self { block, shared: shared.clone(), final_layer_norm, span: tracing::span!(tracing::Level::TRACE, "stack"), }) } fn forward( &mut self, input_ids: &Tensor, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let _enter = self.span.enter(); let input_embeds = self.shared.as_ref().forward(input_ids)?; let mut hidden_states = input_embeds; let mut position_bias = None; for block in self.block.iter_mut() { (hidden_states, position_bias) = block.forward( &hidden_states, position_bias.as_ref(), encoder_hidden_states, )? } self.final_layer_norm.forward(&hidden_states) } fn clear_kv_cache(&mut self) { self.block.iter_mut().for_each(|b| b.clear_kv_cache()) } } #[derive(Debug, Clone)] pub struct T5EncoderModel { encoder: T5Stack, device: Device, span: tracing::Span, } impl T5EncoderModel { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let shared_vb = if vb.contains_key("shared.weight") { vb.pp("shared") } else { vb.pp("decoder").pp("embed_tokens") }; let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?; let shared = Arc::new(shared); let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, cfg)?; Ok(Self { encoder, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "encoder"), }) } pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.encoder.forward(input_ids, None) } pub fn device(&self) -> &Device { &self.device } pub fn clear_kv_cache(&mut self) { self.encoder.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct T5ForConditionalGeneration { encoder: T5Stack, decoder: T5Stack, d_model: usize, tie_word_embeddings: bool, lm_head: Option<QMatMul>, shared: Arc<Embedding>, device: Device, span_decode: tracing::Span, span_decode_head: tracing::Span, } impl T5ForConditionalGeneration { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { assert!(cfg.is_encoder_decoder); let d_model = cfg.d_model; let shared_vb = if vb.contains_key("shared.weight") { vb.pp("shared") } else { vb.pp("decoder").pp("embed_tokens") }; let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?; let shared = Arc::new(shared); let mut encoder_cfg = cfg.clone(); encoder_cfg.is_decoder = false; encoder_cfg.use_cache = false; encoder_cfg.is_encoder_decoder = false; let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, &encoder_cfg)?; let mut decoder_cfg = cfg.clone(); decoder_cfg.is_decoder = true; decoder_cfg.is_encoder_decoder = false; decoder_cfg.num_layers = cfg.num_decoder_layers.unwrap_or(cfg.num_layers); let decoder = T5Stack::load(true, vb.pp("decoder"), &shared, &decoder_cfg)?; let tie_word_embeddings = cfg.tie_word_embeddings; let lm_head = if tie_word_embeddings { None } else { Some(QMatMul::new(cfg.d_model, cfg.vocab_size, vb.pp("lm_head"))?) }; Ok(Self { encoder, decoder, d_model, tie_word_embeddings, lm_head, shared, device: vb.device().clone(), span_decode: tracing::span!(tracing::Level::TRACE, "decode"), span_decode_head: tracing::span!(tracing::Level::TRACE, "decode-head"), }) } pub fn encode(&mut self, input_ids: &Tensor) -> Result<Tensor> { self.encoder.forward(input_ids, None) } pub fn decode( &mut self, decoder_input_ids: &Tensor, encoder_output: &Tensor, ) -> Result<Tensor> { let _enter = self.span_decode.enter(); let decoder_output = self .decoder .forward(decoder_input_ids, Some(encoder_output))?; let scaling_factor = if self.tie_word_embeddings { // Rescale output before projecting on vocab // See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 (self.d_model as f64).sqrt() } else { 1.0 }; let sequence_output = ((decoder_output .narrow(1, decoder_output.dim(1)? - 1, 1)? .squeeze(1)?) * scaling_factor)?; let output = { let _enter = self.span_decode_head.enter(); match self.lm_head { None => sequence_output.matmul(&self.shared.embeddings().t()?)?, Some(ref lm_head) => lm_head.forward(&sequence_output)?, } }; Ok(output) } pub fn forward(&mut self, input_ids: &Tensor, decoder_input_ids: &Tensor) -> Result<Tensor> { let encoder_output = self.encode(input_ids)?; self.decode(decoder_input_ids, &encoder_output) } pub fn device(&self) -> &Device { &self.device } pub fn clear_kv_cache(&mut self) { self.encoder.clear_kv_cache(); self.decoder.clear_kv_cache(); } }
candle/candle-transformers/src/models/quantized_t5.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_t5.rs", "repo_id": "candle", "token_count": 14169 }
//! Siglip model implementation. //! //! Siglip architecture combining vision and language for zero-shot tasks. //! //! References: //! - 🤗 [Model Card](https://huggingface.co/google/siglip-base-patch16-224) //! use crate::models::clip::div_l2_norm; use candle::{IndexOp, Module, Result, Tensor, D}; use candle_nn::{layer_norm, linear, LayerNorm, Linear, VarBuilder}; // https://github.com/huggingface/transformers/blob/2e24ee4dfa39cc0bc264b89edbccc373c8337086/src/transformers/models/siglip/configuration_siglip.py#L27 #[derive(serde::Deserialize, Clone, Debug)] pub struct TextConfig { pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub max_position_embeddings: usize, pub hidden_act: candle_nn::Activation, pub layer_norm_eps: f64, pub pad_token_id: u32, pub bos_token_id: u32, pub eos_token_id: u32, } // https://github.com/huggingface/transformers/blob/2e24ee4dfa39cc0bc264b89edbccc373c8337086/src/transformers/models/siglip/configuration_siglip.py#L132 #[derive(serde::Deserialize, Clone, Debug)] pub struct VisionConfig { pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_channels: usize, pub image_size: usize, pub patch_size: usize, pub hidden_act: candle_nn::Activation, pub layer_norm_eps: f64, } trait TransformerConfig { fn hidden_size(&self) -> usize; fn intermediate_size(&self) -> usize; fn num_attention_heads(&self) -> usize; fn num_hidden_layers(&self) -> usize; fn layer_norm_eps(&self) -> f64; fn hidden_act(&self) -> candle_nn::Activation; } impl TransformerConfig for TextConfig { fn hidden_size(&self) -> usize { self.hidden_size } fn intermediate_size(&self) -> usize { self.intermediate_size } fn num_attention_heads(&self) -> usize { self.num_attention_heads } fn num_hidden_layers(&self) -> usize { self.num_hidden_layers } fn layer_norm_eps(&self) -> f64 { self.layer_norm_eps } fn hidden_act(&self) -> candle_nn::Activation { self.hidden_act } } impl TransformerConfig for VisionConfig { fn hidden_size(&self) -> usize { self.hidden_size } fn intermediate_size(&self) -> usize { self.intermediate_size } fn num_attention_heads(&self) -> usize { self.num_attention_heads } fn num_hidden_layers(&self) -> usize { self.num_hidden_layers } fn layer_norm_eps(&self) -> f64 { self.layer_norm_eps } fn hidden_act(&self) -> candle_nn::Activation { self.hidden_act } } impl VisionConfig { pub fn paligemma_3b_224() -> Self { Self { // https://huggingface.co/google/paligemma-3b-pt-224/blob/main/config.json patch_size: 14, num_attention_heads: 16, num_hidden_layers: 27, hidden_size: 1152, intermediate_size: 4304, image_size: 224, // num_image_tokens: (224 / 14)^2 = 256 // Default values. num_channels: 3, hidden_act: candle_nn::Activation::GeluPytorchTanh, layer_norm_eps: 1e-6, } } pub fn paligemma_3b_448() -> Self { Self { // https://huggingface.co/google/paligemma-3b-pt-448/blob/main/config.json patch_size: 14, num_attention_heads: 16, num_hidden_layers: 27, hidden_size: 1152, intermediate_size: 4304, image_size: 448, // num_image_tokens: (448 / 14)^2 = 1024 // Default values. num_channels: 3, hidden_act: candle_nn::Activation::GeluPytorchTanh, layer_norm_eps: 1e-6, } } pub fn paligemma_3b_896() -> Self { Self { // https://huggingface.co/google/paligemma-3b-pt-448/blob/main/config.json patch_size: 14, num_attention_heads: 16, num_hidden_layers: 27, hidden_size: 1152, intermediate_size: 4304, image_size: 896, // num_image_tokens: (896 / 14)^2 = 4096 // Default values. num_channels: 3, hidden_act: candle_nn::Activation::GeluPytorchTanh, layer_norm_eps: 1e-6, } } pub fn num_patches(&self) -> usize { (self.image_size / self.patch_size).pow(2) } } // https://github.com/huggingface/transformers/blob/2e24ee4dfa39cc0bc264b89edbccc373c8337086/src/transformers/models/siglip/configuration_siglip.py#L228 #[derive(serde::Deserialize, Clone, Debug)] pub struct Config { pub text_config: TextConfig, pub vision_config: VisionConfig, } impl Config { pub fn base_patch16_224() -> Self { let text_config = TextConfig { // https://huggingface.co/google/siglip-base-patch16-224/blob/main/config.json hidden_size: 768, intermediate_size: 3072, num_attention_heads: 12, vocab_size: 32000, // Default values. pad_token_id: 1, bos_token_id: 49406, eos_token_id: 49407, layer_norm_eps: 1e-6, hidden_act: candle_nn::Activation::GeluPytorchTanh, max_position_embeddings: 64, num_hidden_layers: 12, }; let vision_config = VisionConfig { patch_size: 16, // Default values. hidden_size: 768, intermediate_size: 3072, num_hidden_layers: 12, num_attention_heads: 12, num_channels: 3, image_size: 224, hidden_act: candle_nn::Activation::GeluPytorchTanh, layer_norm_eps: 1e-6, }; Self { text_config, vision_config, } } } #[derive(Clone, Debug)] struct MultiheadAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, out_proj: Linear, num_heads: usize, } impl MultiheadAttention { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let w_in_proj = vb.get((3 * h, h), "in_proj_weight")?.chunk(3, 0)?; let b_in_proj = vb.get(3 * h, "in_proj_bias")?.chunk(3, 0)?; let q_proj = Linear::new(w_in_proj[0].clone(), Some(b_in_proj[0].clone())); let k_proj = Linear::new(w_in_proj[1].clone(), Some(b_in_proj[1].clone())); let v_proj = Linear::new(w_in_proj[2].clone(), Some(b_in_proj[2].clone())); let out_proj = linear(h, h, vb.pp("out_proj"))?; Ok(Self { q_proj, k_proj, v_proj, out_proj, num_heads, }) } fn separate_heads(&self, x: &Tensor) -> Result<Tensor> { let (b, n, c) = x.dims3()?; x.reshape((b, n, self.num_heads, c / self.num_heads))? .transpose(1, 2)? .contiguous() } fn recombine_heads(&self, x: &Tensor) -> Result<Tensor> { let (b, n_heads, n_tokens, c_per_head) = x.dims4()?; x.transpose(1, 2)? .reshape((b, n_tokens, n_heads * c_per_head)) } fn forward(&self, q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> { let q = self.q_proj.forward(&q.contiguous()?)?; let k = self.k_proj.forward(&k.contiguous()?)?; let v = self.v_proj.forward(&v.contiguous()?)?; let q = self.separate_heads(&q)?; let k = self.separate_heads(&k)?; let v = self.separate_heads(&v)?; let (_, _, _, c_per_head) = q.dims4()?; let attn = (q.matmul(&k.t()?)? / (c_per_head as f64).sqrt())?; let attn = candle_nn::ops::softmax_last_dim(&attn)?; let out = attn.matmul(&v)?; self.recombine_heads(&out)?.apply(&self.out_proj) } } #[derive(Debug, Clone)] struct MultiheadAttentionPoolingHead { probe: Tensor, attention: MultiheadAttention, layernorm: LayerNorm, mlp: Mlp, } impl MultiheadAttentionPoolingHead { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let mlp = Mlp::new(cfg, vb.pp("mlp"))?; let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("layernorm"))?; let probe = vb.get((1, 1, cfg.hidden_size), "probe")?; let attention = MultiheadAttention::new(cfg, vb.pp("attention"))?; Ok(Self { probe, attention, layernorm, mlp, }) } } impl Module for MultiheadAttentionPoolingHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let batch_size = xs.dim(0)?; let probe = self.probe.repeat((batch_size, 1, 1))?; let xs = self.attention.forward(&probe, xs, xs)?; let residual = &xs; let xs = xs.apply(&self.layernorm)?.apply(&self.mlp)?; (xs + residual)?.i((.., 0)) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, out_proj: Linear, num_heads: usize, head_dim: usize, scale: f64, } impl Attention { fn new<C: TransformerConfig>(cfg: &C, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size(); let q_proj = linear(embed_dim, embed_dim, vb.pp("q_proj"))?; let k_proj = linear(embed_dim, embed_dim, vb.pp("k_proj"))?; let v_proj = linear(embed_dim, embed_dim, vb.pp("v_proj"))?; let out_proj = linear(embed_dim, embed_dim, vb.pp("out_proj"))?; let num_heads = cfg.num_attention_heads(); let head_dim = embed_dim / num_heads; Ok(Self { q_proj, k_proj, v_proj, out_proj, num_heads, head_dim, scale: (head_dim as f64).powf(-0.5), }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let (batch_size, q_len, _) = xs.dims3()?; let query_states = xs.apply(&self.q_proj)?; let key_states = xs.apply(&self.k_proj)?; let value_states = xs.apply(&self.v_proj)?; let shape = (batch_size, q_len, self.num_heads, self.head_dim); let query_states = query_states.reshape(shape)?.transpose(1, 2)?.contiguous()?; let key_states = key_states.reshape(shape)?.transpose(1, 2)?.contiguous()?; let value_states = value_states.reshape(shape)?.transpose(1, 2)?.contiguous()?; let attn_weights = (query_states.matmul(&key_states.t()?)? * self.scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; // The original implementation upcasts to f32 but candle_nn::ops::softmax should handle this properly. let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_outputs = attn_weights .matmul(&value_states)? .transpose(1, 2)? .reshape((batch_size, q_len, ()))? .apply(&self.out_proj)?; Ok(attn_outputs) } } // https://github.com/huggingface/transformers/blob/2e24ee4dfa39cc0bc264b89edbccc373c8337086/src/transformers/models/siglip/modeling_siglip.py#L599 #[derive(Debug, Clone)] struct Mlp { fc1: Linear, fc2: Linear, activation_fn: candle_nn::Activation, } impl Mlp { fn new<C: TransformerConfig>(cfg: &C, vb: VarBuilder) -> Result<Self> { let hidden_size = cfg.hidden_size(); let intermediate_size = cfg.intermediate_size(); let fc1 = candle_nn::linear(hidden_size, intermediate_size, vb.pp("fc1"))?; let fc2 = candle_nn::linear(intermediate_size, hidden_size, vb.pp("fc2"))?; Ok(Self { fc1, fc2, activation_fn: cfg.hidden_act(), }) } } impl Module for Mlp { fn forward(&self, xs: &candle::Tensor) -> Result<candle::Tensor> { xs.apply(&self.fc1)? .apply(&self.activation_fn)? .apply(&self.fc2) } } // https://github.com/huggingface/transformers/blob/2e24ee4dfa39cc0bc264b89edbccc373c8337086/src/transformers/models/siglip/modeling_siglip.py#L614 #[derive(Debug, Clone)] struct EncoderLayer { self_attn: Attention, layer_norm1: LayerNorm, mlp: Mlp, layer_norm2: LayerNorm, } impl EncoderLayer { fn new<C: TransformerConfig>(cfg: &C, vb: VarBuilder) -> Result<Self> { let hidden_size = cfg.hidden_size(); let layer_norm_eps = cfg.layer_norm_eps(); let self_attn = Attention::new(cfg, vb.pp("self_attn"))?; let layer_norm1 = layer_norm(hidden_size, layer_norm_eps, vb.pp("layer_norm1"))?; let mlp = Mlp::new(cfg, vb.pp("mlp"))?; let layer_norm2 = layer_norm(hidden_size, layer_norm_eps, vb.pp("layer_norm2"))?; Ok(Self { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.layer_norm1)?; let xs = self.self_attn.forward(&xs, attention_mask)?; let xs = (residual + xs)?; let residual = &xs; let xs = xs.apply(&self.layer_norm2)?.apply(&self.mlp)?; let xs = (xs + residual)?; Ok(xs) } } #[derive(Debug, Clone)] struct Encoder { layers: Vec<EncoderLayer>, } impl Encoder { fn new<C: TransformerConfig>(cfg: &C, vb: VarBuilder) -> Result<Self> { let mut layers = vec![]; let vb = vb.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers() { let layer = EncoderLayer::new(cfg, vb.pp(layer_idx))?; layers.push(layer) } Ok(Self { layers }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, attention_mask)? } Ok(xs) } } #[derive(Debug, Clone)] struct VisionEmbeddings { patch_embedding: candle_nn::Conv2d, position_embedding: candle_nn::Embedding, position_ids: Tensor, } impl VisionEmbeddings { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let conv2d_cfg = candle_nn::Conv2dConfig { stride: cfg.patch_size, ..Default::default() }; let patch_embedding = candle_nn::conv2d( cfg.num_channels, cfg.hidden_size, cfg.patch_size, conv2d_cfg, vb.pp("patch_embedding"), )?; let num_patches = (cfg.image_size / cfg.patch_size).pow(2); let position_ids = Tensor::arange(0, num_patches as i64, vb.device())?; let position_embedding = candle_nn::embedding(num_patches, cfg.hidden_size(), vb.pp("position_embedding"))?; Ok(Self { patch_embedding, position_embedding, position_ids, }) } } impl Module for VisionEmbeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_batch, _channels, _height, _width) = xs.dims4()?; let embeddings = xs.apply(&self.patch_embedding)?; let embeddings = embeddings.flatten_from(2)?.transpose(1, 2)?; let position_embedding = self.position_embedding.forward(&self.position_ids)?; embeddings.broadcast_add(&position_embedding) } } #[derive(Debug, Clone)] struct VisionTransformer { embeddings: VisionEmbeddings, encoder: Encoder, post_layernorm: LayerNorm, head: Option<MultiheadAttentionPoolingHead>, } impl VisionTransformer { fn new(cfg: &VisionConfig, use_head: bool, vb: VarBuilder) -> Result<Self> { let embeddings = VisionEmbeddings::new(cfg, vb.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let post_layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_layernorm"))?; let head = if use_head { Some(MultiheadAttentionPoolingHead::new(cfg, vb.pp("head"))?) } else { None }; Ok(Self { embeddings, encoder, post_layernorm, head, }) } } impl Module for VisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.embeddings)?; let xs = self.encoder.forward(&xs, None)?; let xs = xs.apply(&self.post_layernorm)?; match self.head.as_ref() { None => Ok(xs), Some(h) => xs.apply(h), } } } #[derive(Debug, Clone)] pub struct VisionModel { vision_model: VisionTransformer, } impl VisionModel { pub fn new(cfg: &VisionConfig, use_head: bool, vb: VarBuilder) -> Result<Self> { let vision_model = VisionTransformer::new(cfg, use_head, vb)?; Ok(Self { vision_model }) } } impl Module for VisionModel { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.vision_model) } } #[derive(Debug, Clone)] struct TextEmbeddings { token_embedding: candle_nn::Embedding, position_embedding: candle_nn::Embedding, position_ids: Tensor, } impl TextEmbeddings { fn new(cfg: &TextConfig, vb: VarBuilder) -> Result<Self> { let token_embedding = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("token_embedding"))?; let position_embedding = candle_nn::embedding( cfg.max_position_embeddings, cfg.hidden_size, vb.pp("position_embedding"), )?; let position_ids = Tensor::arange(0u32, cfg.max_position_embeddings as u32, vb.device())?.unsqueeze(0)?; Ok(Self { token_embedding, position_embedding, position_ids, }) } } impl Module for TextEmbeddings { fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let seq_length = input_ids.dim(D::Minus1)?; let inputs_embeds = self.token_embedding.forward(input_ids)?; let position_ids = self.position_ids.narrow(1, 0, seq_length)?; let position_embedding = self.position_embedding.forward(&position_ids)?; inputs_embeds.broadcast_add(&position_embedding) } } #[derive(Debug, Clone)] pub struct TextTransformer { embeddings: TextEmbeddings, encoder: Encoder, final_layer_norm: LayerNorm, pub head: Linear, } impl TextTransformer { fn new(cfg: &TextConfig, vb: VarBuilder) -> Result<Self> { let embeddings = TextEmbeddings::new(cfg, vb.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let final_layer_norm = layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb.pp("final_layer_norm"), )?; let head = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("head"))?; Ok(Self { embeddings, encoder, final_layer_norm, head, }) } } impl Module for TextTransformer { fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let (_bsz, seq_len) = input_ids.dims2()?; let input_ids = self.embeddings.forward(input_ids)?; let input_ids = self.encoder.forward(&input_ids, None)?; let last_hidden_state = self.final_layer_norm.forward(&input_ids)?; last_hidden_state .i((.., seq_len - 1, ..))? .contiguous()? .apply(&self.head) } } #[derive(Debug, Clone)] pub struct TextModel { pub text_model: TextTransformer, } impl TextModel { pub fn new(cfg: &TextConfig, vb: VarBuilder) -> Result<Self> { let text_model = TextTransformer::new(cfg, vb)?; Ok(Self { text_model }) } } impl Module for TextModel { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.text_model) } } #[derive(Clone, Debug)] pub struct Model { text_model: TextModel, vision_model: VisionModel, logit_bias: Tensor, logit_scale: Tensor, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let text_model = TextModel::new(&cfg.text_config, vb.pp("text_model"))?; let vision_model = VisionModel::new(&cfg.vision_config, true, vb.pp("vision_model"))?; let logit_scale = vb.get(&[1], "logit_scale")?; let logit_bias = vb.get(&[1], "logit_bias")?; Ok(Self { text_model, vision_model, logit_bias, logit_scale, }) } pub fn get_text_features(&self, input_ids: &Tensor) -> Result<Tensor> { input_ids.apply(&self.text_model) } pub fn get_image_features(&self, pixel_values: &Tensor) -> Result<Tensor> { pixel_values.apply(&self.vision_model) } pub fn forward(&self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<(Tensor, Tensor)> { let image_features = self.get_image_features(pixel_values)?; let text_features = self.get_text_features(input_ids)?; let image_features_normalized = div_l2_norm(&image_features)?; let text_features_normalized = div_l2_norm(&text_features)?; let logits_per_text = text_features_normalized.matmul(&image_features_normalized.t()?)?; let logit_scale = self.logit_scale.exp()?; let logits_per_text = logits_per_text .broadcast_mul(&logit_scale)? .broadcast_add(&self.logit_bias)?; let logits_per_image = logits_per_text.t()?; Ok((logits_per_text, logits_per_image)) } }
candle/candle-transformers/src/models/siglip.rs/0
{ "file_path": "candle/candle-transformers/src/models/siglip.rs", "repo_id": "candle", "token_count": 10577 }
//! StarCoder model implementation with quantization support. //! //! StarCoder is a large language model optimized for code generation. //! This implementation provides quantization for reduced memory and compute. //! //! Key characteristics: //! - Causal self-attention mechanism //! - Multi-query attention (MQA) //! - LayerNorm for normalization //! - Absolute positional embeddings //! - Support for 8-bit quantization //! //! References: //! - 📝 [StarCoder Paper](https://arxiv.org/abs/2305.06161) //! - 🤗 [Model Card](https://huggingface.co/bigcode/starcoder) //! use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{layer_norm, linear_b, LayerNorm, Linear, VarBuilder}; use std::sync::Arc; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { vocab_size: usize, hidden_size: usize, intermediate_size: usize, num_hidden_layers: usize, num_attention_heads: usize, num_key_value_heads: usize, hidden_act: candle_nn::Activation, max_position_embeddings: usize, norm_epsilon: f64, rope_theta: f64, use_bias: bool, sliding_window: Option<usize>, } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?; let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { c_fc: Linear, c_proj: Linear, act: candle_nn::Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let (h_size, i_size) = (cfg.hidden_size, cfg.intermediate_size); let c_fc = linear_b(h_size, i_size, cfg.use_bias, vb.pp("c_fc"))?; let c_proj = linear_b(i_size, h_size, cfg.use_bias, vb.pp("c_proj"))?; Ok(Self { c_fc, c_proj, act: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.c_fc)?.apply(&self.act)?.apply(&self.c_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let b = cfg.use_bias; let q_proj = linear_b(hidden_sz, num_heads * head_dim, b, vb.pp("q_proj"))?; let k_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("k_proj"))?; let v_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("v_proj"))?; let o_proj = linear_b(num_heads * head_dim, hidden_sz, b, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?; let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights.matmul(&value_states)?; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: LayerNorm, post_attention_layernorm: LayerNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = layer_norm(cfg.hidden_size, cfg.norm_epsilon, vb.pp("input_layernorm"))?; let post_attention_layernorm = layer_norm( cfg.hidden_size, cfg.norm_epsilon, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: LayerNorm, lm_head: Linear, sliding_window: Option<usize>, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = layer_norm(cfg.hidden_size, cfg.norm_epsilon, vb_m.pp("norm"))?; let lm_head = candle_nn::Linear::new(embed_tokens.embeddings().clone(), None); Ok(Self { embed_tokens, layers, norm, lm_head, sliding_window: cfg.sliding_window, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let sliding_window = self.sliding_window.unwrap_or(tgt_len + 42); let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
candle/candle-transformers/src/models/starcoder2.rs/0
{ "file_path": "candle/candle-transformers/src/models/starcoder2.rs", "repo_id": "candle", "token_count": 6059 }
use super::common::LayerNormNoWeights; use candle::{Module, Result, Tensor}; use candle_nn::VarBuilder; #[derive(Debug)] pub struct MixingResidualBlock { norm1: LayerNormNoWeights, depthwise_conv: candle_nn::Conv2d, norm2: LayerNormNoWeights, channelwise_lin1: candle_nn::Linear, channelwise_lin2: candle_nn::Linear, gammas: Vec<f32>, } impl MixingResidualBlock { pub fn new(inp: usize, embed_dim: usize, vb: VarBuilder) -> Result<Self> { let norm1 = LayerNormNoWeights::new(inp)?; let norm2 = LayerNormNoWeights::new(inp)?; let cfg = candle_nn::Conv2dConfig { groups: inp, ..Default::default() }; let depthwise_conv = candle_nn::conv2d(inp, inp, 3, cfg, vb.pp("depthwise.1"))?; let channelwise_lin1 = candle_nn::linear(inp, embed_dim, vb.pp("channelwise.0"))?; let channelwise_lin2 = candle_nn::linear(embed_dim, inp, vb.pp("channelwise.2"))?; let gammas = vb.get(6, "gammas")?.to_vec1::<f32>()?; Ok(Self { norm1, depthwise_conv, norm2, channelwise_lin1, channelwise_lin2, gammas, }) } } impl Module for MixingResidualBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mods = &self.gammas; let x_temp = xs .permute((0, 2, 3, 1))? .apply(&self.norm1)? .permute((0, 3, 1, 2))? .affine(1. + mods[0] as f64, mods[1] as f64)?; let x_temp = candle_nn::ops::replication_pad2d(&x_temp, 1)?; let xs = (xs + x_temp.apply(&self.depthwise_conv)? * mods[2] as f64)?; let x_temp = xs .permute((0, 2, 3, 1))? .apply(&self.norm2)? .permute((0, 3, 1, 2))? .affine(1. + mods[3] as f64, mods[4] as f64)?; let x_temp = x_temp .permute((0, 2, 3, 1))? .contiguous()? .apply(&self.channelwise_lin1)? .gelu()? .apply(&self.channelwise_lin2)? .permute((0, 3, 1, 2))?; xs + x_temp * mods[5] as f64 } } #[derive(Debug)] pub struct PaellaVQ { in_block_conv: candle_nn::Conv2d, out_block_conv: candle_nn::Conv2d, down_blocks: Vec<(Option<candle_nn::Conv2d>, MixingResidualBlock)>, down_blocks_conv: candle_nn::Conv2d, down_blocks_bn: candle_nn::BatchNorm, up_blocks_conv: candle_nn::Conv2d, up_blocks: Vec<(Vec<MixingResidualBlock>, Option<candle_nn::ConvTranspose2d>)>, } impl PaellaVQ { pub fn new(vb: VarBuilder) -> Result<Self> { const IN_CHANNELS: usize = 3; const OUT_CHANNELS: usize = 3; const LATENT_CHANNELS: usize = 4; const EMBED_DIM: usize = 384; const BOTTLENECK_BLOCKS: usize = 12; const C_LEVELS: [usize; 2] = [EMBED_DIM / 2, EMBED_DIM]; let in_block_conv = candle_nn::conv2d( IN_CHANNELS * 4, C_LEVELS[0], 1, Default::default(), vb.pp("in_block.1"), )?; let out_block_conv = candle_nn::conv2d( C_LEVELS[0], OUT_CHANNELS * 4, 1, Default::default(), vb.pp("out_block.0"), )?; let mut down_blocks = Vec::new(); let vb_d = vb.pp("down_blocks"); let mut d_idx = 0; for (i, &c_level) in C_LEVELS.iter().enumerate() { let conv_block = if i > 0 { let cfg = candle_nn::Conv2dConfig { padding: 1, stride: 2, ..Default::default() }; let block = candle_nn::conv2d(C_LEVELS[i - 1], c_level, 4, cfg, vb_d.pp(d_idx))?; d_idx += 1; Some(block) } else { None }; let res_block = MixingResidualBlock::new(c_level, c_level * 4, vb_d.pp(d_idx))?; d_idx += 1; down_blocks.push((conv_block, res_block)) } let vb_d = vb_d.pp(d_idx); let down_blocks_conv = candle_nn::conv2d_no_bias( C_LEVELS[1], LATENT_CHANNELS, 1, Default::default(), vb_d.pp(0), )?; let down_blocks_bn = candle_nn::batch_norm(LATENT_CHANNELS, 1e-5, vb_d.pp(1))?; let mut up_blocks = Vec::new(); let vb_u = vb.pp("up_blocks"); let mut u_idx = 0; let up_blocks_conv = candle_nn::conv2d( LATENT_CHANNELS, C_LEVELS[1], 1, Default::default(), vb_u.pp(u_idx).pp(0), )?; u_idx += 1; for (i, &c_level) in C_LEVELS.iter().rev().enumerate() { let mut res_blocks = Vec::new(); let n_bottleneck_blocks = if i == 0 { BOTTLENECK_BLOCKS } else { 1 }; for _j in 0..n_bottleneck_blocks { let res_block = MixingResidualBlock::new(c_level, c_level * 4, vb_u.pp(u_idx))?; u_idx += 1; res_blocks.push(res_block) } let conv_block = if i < C_LEVELS.len() - 1 { let cfg = candle_nn::ConvTranspose2dConfig { padding: 1, stride: 2, ..Default::default() }; let block = candle_nn::conv_transpose2d( c_level, C_LEVELS[C_LEVELS.len() - i - 2], 4, cfg, vb_u.pp(u_idx), )?; u_idx += 1; Some(block) } else { None }; up_blocks.push((res_blocks, conv_block)) } Ok(Self { in_block_conv, down_blocks, down_blocks_conv, down_blocks_bn, up_blocks, up_blocks_conv, out_block_conv, }) } pub fn encode(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = candle_nn::ops::pixel_unshuffle(xs, 2)?.apply(&self.in_block_conv)?; for down_block in self.down_blocks.iter() { if let Some(conv) = &down_block.0 { xs = xs.apply(conv)? } xs = xs.apply(&down_block.1)? } xs.apply(&self.down_blocks_conv)? .apply_t(&self.down_blocks_bn, false) } pub fn decode(&self, xs: &Tensor) -> Result<Tensor> { // TODO: quantizer if we want to support `force_not_quantize=False`. let mut xs = xs.apply(&self.up_blocks_conv)?; for up_block in self.up_blocks.iter() { for b in up_block.0.iter() { xs = xs.apply(b)?; } if let Some(conv) = &up_block.1 { xs = xs.apply(conv)? } } xs.apply(&self.out_block_conv)? .apply(&|xs: &_| candle_nn::ops::pixel_shuffle(xs, 2)) } } impl Module for PaellaVQ { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.decode(&self.encode(xs)?) } }
candle/candle-transformers/src/models/wuerstchen/paella_vq.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/paella_vq.rs", "repo_id": "candle", "token_count": 4078 }
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Bert</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module" src="./code.js"></script> <script type="module"> import { hcl } from "https://cdn.skypack.dev/d3-color@3"; import { interpolateReds } from "https://cdn.skypack.dev/d3-scale-chromatic@3"; import { scaleLinear } from "https://cdn.skypack.dev/d3-scale@4"; import { getModelInfo, getEmbeddings, getWikiText, cosineSimilarity, } from "./utils.js"; const bertWorker = new Worker("./bertWorker.js", { type: "module", }); const inputContainerEL = document.querySelector("#input-container"); const textAreaEl = document.querySelector("#input-area"); const outputAreaEl = document.querySelector("#output-area"); const formEl = document.querySelector("#form"); const searchInputEl = document.querySelector("#search-input"); const formWikiEl = document.querySelector("#form-wiki"); const searchWikiEl = document.querySelector("#search-wiki"); const outputStatusEl = document.querySelector("#output-status"); const modelSelectEl = document.querySelector("#model"); const sentencesRegex = /(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<![A-Z]\.)(?<=\.|\?)\s/gm; let sentenceEmbeddings = []; let currInputText = ""; let isCalculating = false; function toggleTextArea(state) { if (state) { textAreaEl.hidden = false; textAreaEl.focus(); } else { textAreaEl.hidden = true; } } inputContainerEL.addEventListener("focus", (e) => { toggleTextArea(true); }); textAreaEl.addEventListener("blur", (e) => { toggleTextArea(false); }); textAreaEl.addEventListener("focusout", (e) => { toggleTextArea(false); if (currInputText === textAreaEl.value || isCalculating) return; populateOutputArea(textAreaEl.value); calculateEmbeddings(textAreaEl.value); }); modelSelectEl.addEventListener("change", (e) => { if (currInputText === "" || isCalculating) return; populateOutputArea(textAreaEl.value); calculateEmbeddings(textAreaEl.value); }); function populateOutputArea(text) { currInputText = text; const sentences = text.split(sentencesRegex); outputAreaEl.innerHTML = ""; for (const [id, sentence] of sentences.entries()) { const sentenceEl = document.createElement("span"); sentenceEl.id = `sentence-${id}`; sentenceEl.innerText = sentence + " "; outputAreaEl.appendChild(sentenceEl); } } formEl.addEventListener("submit", async (e) => { e.preventDefault(); if (isCalculating || currInputText === "") return; toggleInputs(true); const modelID = modelSelectEl.value; const { modelURL, tokenizerURL, configURL, search_prefix } = getModelInfo(modelID); const text = searchInputEl.value; const query = search_prefix + searchInputEl.value; outputStatusEl.classList.remove("invisible"); outputStatusEl.innerText = "Calculating embeddings for query..."; isCalculating = true; const out = await getEmbeddings( bertWorker, modelURL, tokenizerURL, configURL, modelID, [query] ); outputStatusEl.classList.add("invisible"); const queryEmbeddings = out.output[0]; // calculate cosine similarity with all sentences given the query const distances = sentenceEmbeddings .map((embedding, id) => ({ id, similarity: cosineSimilarity(queryEmbeddings, embedding), })) .sort((a, b) => b.similarity - a.similarity) // getting top 10 most similar sentences .slice(0, 10); const colorScale = scaleLinear() .domain([ distances[distances.length - 1].similarity, distances[0].similarity, ]) .range([0, 1]) .interpolate(() => interpolateReds); outputAreaEl.querySelectorAll("span").forEach((el) => { el.style.color = "unset"; el.style.backgroundColor = "unset"; }); distances.forEach((d) => { const el = outputAreaEl.querySelector(`#sentence-${d.id}`); const color = colorScale(d.similarity); const fontColor = hcl(color).l < 70 ? "white" : "black"; el.style.color = fontColor; el.style.backgroundColor = color; }); outputAreaEl .querySelector(`#sentence-${distances[0].id}`) .scrollIntoView({ behavior: "smooth", block: "center", inline: "nearest", }); isCalculating = false; toggleInputs(false); }); async function calculateEmbeddings(text) { isCalculating = true; toggleInputs(true); const modelID = modelSelectEl.value; const { modelURL, tokenizerURL, configURL, document_prefix } = getModelInfo(modelID); const sentences = text.split(sentencesRegex); const allEmbeddings = []; outputStatusEl.classList.remove("invisible"); for (const [id, sentence] of sentences.entries()) { const query = document_prefix + sentence; outputStatusEl.innerText = `Calculating embeddings: sentence ${ id + 1 } of ${sentences.length}`; const embeddings = await getEmbeddings( bertWorker, modelURL, tokenizerURL, configURL, modelID, [query], updateStatus ); allEmbeddings.push(embeddings); } outputStatusEl.classList.add("invisible"); sentenceEmbeddings = allEmbeddings.map((e) => e.output[0]); isCalculating = false; toggleInputs(false); } function updateStatus(data) { if ("status" in data) { if (data.status === "loading") { outputStatusEl.innerText = data.message; outputStatusEl.classList.remove("invisible"); } } } function toggleInputs(state) { const interactive = document.querySelectorAll(".interactive"); interactive.forEach((el) => { if (state) { el.disabled = true; } else { el.disabled = false; } }); } searchWikiEl.addEventListener("input", () => { searchWikiEl.setCustomValidity(""); }); formWikiEl.addEventListener("submit", async (e) => { e.preventDefault(); if ("example" in e.submitter.dataset) { searchWikiEl.value = e.submitter.innerText; } const text = searchWikiEl.value; if (isCalculating || text === "") return; try { const wikiText = await getWikiText(text); searchWikiEl.setCustomValidity(""); textAreaEl.innerHTML = wikiText; populateOutputArea(wikiText); calculateEmbeddings(wikiText); searchWikiEl.value = ""; } catch { searchWikiEl.setCustomValidity("Invalid Wikipedia article name"); searchWikiEl.reportValidity(); } }); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-5 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle BERT</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> Running sentence embeddings and similarity search in the browser using the Bert Model written with <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> and compiled to Wasm. Embeddings models from are from <a href="https://huggingface.co/sentence-transformers/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" > Sentence Transformers </a> and <a href="https://huggingface.co/intfloat/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" > Liang Wang - e5 Models </a> </p> </div> <div> <label for="model" class="font-medium block">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light interactive disabled:cursor-not-allowed w-full max-w-max" > <option value="intfloat_e5_small_v2" selected> intfloat/e5-small-v2 (133 MB) </option> <option value="intfloat_e5_base_v2"> intfloat/e5-base-v2 (438 MB) </option> <option value="intfloat_multilingual_e5_small"> intfloat/multilingual-e5-small (471 MB) </option> <option value="sentence_transformers_all_MiniLM_L6_v2"> sentence-transformers/all-MiniLM-L6-v2 (90.9 MB) </option> <option value="sentence_transformers_all_MiniLM_L12_v2"> sentence-transformers/all-MiniLM-L12-v2 (133 MB) </option> </select> </div> <div> <h3 class="font-medium">Examples:</h3> <form id="form-wiki" class="flex text-xs rounded-md justify-between w-min gap-3" > <input type="submit" hidden /> <button data-example class="disabled:cursor-not-allowed interactive"> Pizza </button> <button data-example class="disabled:cursor-not-allowed interactive"> Paris </button> <button data-example class="disabled:cursor-not-allowed interactive"> Physics </button> <input type="text" id="search-wiki" title="Search Wikipedia article by title" class="font-light py-0 mx-1 resize-none outline-none w-32 disabled:cursor-not-allowed interactive" placeholder="Load Wikipedia article..." /> <button title="Search Wikipedia article and load into input" class="bg-gray-700 hover:bg-gray-800 text-white font-normal px-2 py-1 rounded disabled:bg-gray-300 disabled:cursor-not-allowed interactive" > Load </button> </form> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center" > <input type="submit" hidden /> <input type="text" id="search-input" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none interactive disabled:cursor-not-allowed" placeholder="Search query here..." /> <button class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed interactive" > Search </button> </form> <div> <h3 class="font-medium">Input text:</h3> <div class="flex justify-between items-center"> <div class="rounded-md inline text-xs"> <span id="output-status" class="m-auto font-light invisible" >C</span > </div> </div> <div id="input-container" tabindex="0" class="min-h-[250px] bg-slate-100 text-gray-500 rounded-md p-4 flex flex-col gap-2 relative" > <textarea id="input-area" hidden value="" placeholder="Input text to perform semantic similarity search..." class="flex-1 resize-none outline-none left-0 right-0 top-0 bottom-0 m-4 absolute interactive disabled:invisible" ></textarea> <p id="output-area" class="grid-rows-2"> Input text to perform semantic similarity search... </p> </div> </div> </main> </body> </html>
candle/candle-wasm-examples/bert/lib-example.html/0
{ "file_path": "candle/candle-wasm-examples/bert/lib-example.html", "repo_id": "candle", "token_count": 6066 }
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Llama.c Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } code, output, select, pre { font-family: "Source Code Pro", monospace; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> // base url for audio examples const MODELS_BASE_URL = "https://huggingface.co/karpathy/tinyllamas/resolve/main"; // models base url const MODELS = { stories15M: { url: "stories15M.bin", seq_len: 256, }, stories42M: { url: "stories42M.bin", seq_len: 1024, }, stories110M: { url: "stories110M.bin", seq_len: 1024, }, }; const llamaWorker = new Worker("./llama2cWorker.js", { type: "module", }); async function generateSequence(controller) { const getValue = (id) => document.querySelector(`#${id}`).value; const modelID = getValue("model"); const model = MODELS[modelID]; const weightsURL = `${MODELS_BASE_URL}/${model.url}`; const prompt = getValue("prompt"); const temperature = getValue("temperature"); const topP = getValue("top-p"); const repeatPenalty = getValue("repeat_penalty"); const seed = getValue("seed"); const maxSeqLen = getValue("max-seq"); function updateStatus(data) { const outStatus = document.querySelector("#output-status"); const outGen = document.querySelector("#output-generation"); const outCounter = document.querySelector("#output-counter"); switch (data.status) { case "loading": outStatus.hidden = false; outStatus.textContent = data.message; outGen.hidden = true; outCounter.hidden = true; break; case "generating": const { message, prompt, sentence, tokensSec, totalTime } = data; outStatus.hidden = true; outCounter.hidden = false; outGen.hidden = false; outGen.innerHTML = `<span class="font-semibold">${prompt}</span>${sentence.replace( /\<s\>|\<\/s\>/g, "" )}`; outCounter.innerHTML = `${(totalTime / 1000).toFixed( 2 )}s (${tokensSec.toFixed(2)} tok/s)`; break; case "complete": outStatus.hidden = true; outGen.hidden = false; break; } } return new Promise((resolve, reject) => { llamaWorker.postMessage({ weightsURL, modelID, tokenizerURL: "tokenizer.json", prompt, temp: temperature, top_p: topP, repeatPenalty, seed: BigInt(seed), maxSeqLen, command: "start", }); const handleAbort = () => { llamaWorker.postMessage({ command: "abort" }); }; const handleMessage = (event) => { const { status, error, message, prompt, sentence } = event.data; if (status) updateStatus(event.data); if (error) { llamaWorker.removeEventListener("message", handleMessage); reject(new Error(error)); } if (status === "aborted") { llamaWorker.removeEventListener("message", handleMessage); resolve(event.data); } if (status === "complete") { llamaWorker.removeEventListener("message", handleMessage); resolve(event.data); } }; controller.signal.addEventListener("abort", handleAbort); llamaWorker.addEventListener("message", handleMessage); }); } const form = document.querySelector("#form"); const prompt = document.querySelector("#prompt"); const clearBtn = document.querySelector("#clear-btn"); const runBtn = document.querySelector("#run"); const modelSelect = document.querySelector("#model"); let runController = new AbortController(); let isRunning = false; modelSelect.addEventListener("change", (e) => { const model = MODELS[e.target.value]; document.querySelector("#max-seq").max = model.seq_len; document.querySelector("#max-seq").nextElementSibling.value = model.seq_len; }); form.addEventListener("submit", async (e) => { e.preventDefault(); if (isRunning) { stopRunning(); } else { startRunning(); await generateSequence(runController); stopRunning(); } }); function startRunning() { isRunning = true; runBtn.textContent = "Stop"; } function stopRunning() { runController.abort(); runController = new AbortController(); runBtn.textContent = "Run"; isRunning = false; } clearBtn.addEventListener("click", (e) => { e.preventDefault(); prompt.value = ""; clearBtn.classList.add("invisible"); runBtn.disabled = true; stopRunning(); }); prompt.addEventListener("input", (e) => { runBtn.disabled = false; if (e.target.value.length > 0) { clearBtn.classList.remove("invisible"); } else { clearBtn.classList.add("invisible"); } }); </script> </head> <body class="container max-w-4xl mx-auto p-4 text-gray-800"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle Llama2.c</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> <a href="https://github.com/karpathy/llama2.c" target="_blank" class="underline hover:text-blue-500 hover:no-underline" target="_blank" >Llama2.c</a > is Andrey Karpathy's C implementation of the Llama 2 LLM model in C. This demo uses <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> to run Llama2.c in the browser using rust/wasm. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"> <option value="stories15M" selected>stories 15M (60.8 MB)</option> <option value="stories42M">stories 42M (167 MB)</option> <option value="stories110M">stories 110M (438 MB)</option> </select> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center"> <input type="submit" hidden /> <input type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none" placeholder="Add your prompt here..." value="Once upon a time" /> <button id="clear-btn"> <svg fill="none" xmlns="http://www.w3.org/2000/svg" width="40" viewBox="0 0 70 40"> <path opacity=".5" d="M39 .2v40.2" stroke="#1F2937" /> <path d="M1.5 11.5 19 29.1m0-17.6L1.5 29.1" opacity=".5" stroke="#1F2937" stroke-width="2" /> </svg> </button> <button id="run" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Run </button> </form> <details> <summary class="font-medium cursor-pointer">Advanced Options</summary> <div class="grid grid-cols-3 max-w-md items-center gap-3 py-3"> <label class="text-sm font-medium" for="max-seq" >Maximum length </label> <input type="range" id="max-seq" name="max-seq" min="1" max="256" step="1" value="200" oninput="this.nextElementSibling.value = Number(this.value)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 200</output > <label class="text-sm font-medium" for="temperature" >Temperature</label > <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.40" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 0.40</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = BigInt(Math.floor(Math.random() * 2**64-1))" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm"> Rand </button> </div> </details> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2"> <div id="output-counter" hidden class="ml-auto font-semibold grid-rows-1 text-sm"></div> <p hidden id="output-generation" class="grid-rows-2"></p> <span id="output-status" class="m-auto font-light" >No output yet</span > </div> </div> </main> </body> </html>
candle/candle-wasm-examples/llama2-c/lib-example.html/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/lib-example.html", "repo_id": "candle", "token_count": 6089 }
## Running T5 with Candle and WASM Here, we provide two examples of how to run Bert using a Candle-compiled WASM binary and runtime. ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { ModelConditionalGeneration, ModelEncoder } from "./build/m.js"; ``` For the quantized version, we need to import the quantized module: ```js import init, { ModelConditionalGeneration, ModelEncoder } from "./build/m-quantized.js"; ``` The full example can be found under `./index.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/index.html` in your browser.
candle/candle-wasm-examples/t5/README.md/0
{ "file_path": "candle/candle-wasm-examples/t5/README.md", "repo_id": "candle", "token_count": 282 }
// Audio processing code, adapted from whisper.cpp // https://github.com/ggerganov/whisper.cpp use super::worker; pub trait Float: num_traits::Float + num_traits::FloatConst + num_traits::NumAssign {} impl Float for f32 {} impl Float for f64 {} // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2357 fn fft<T: Float>(inp: &[T]) -> Vec<T> { let n = inp.len(); let zero = T::zero(); if n == 1 { return vec![inp[0], zero]; } if n % 2 == 1 { return dft(inp); } let mut out = vec![zero; n * 2]; let mut even = Vec::with_capacity(n / 2); let mut odd = Vec::with_capacity(n / 2); for (i, &inp) in inp.iter().enumerate() { if i % 2 == 0 { even.push(inp) } else { odd.push(inp); } } let even_fft = fft(&even); let odd_fft = fft(&odd); let two_pi = T::PI() + T::PI(); let n_t = T::from(n).unwrap(); for k in 0..n / 2 { let k_t = T::from(k).unwrap(); let theta = two_pi * k_t / n_t; let re = theta.cos(); let im = -theta.sin(); let re_odd = odd_fft[2 * k]; let im_odd = odd_fft[2 * k + 1]; out[2 * k] = even_fft[2 * k] + re * re_odd - im * im_odd; out[2 * k + 1] = even_fft[2 * k + 1] + re * im_odd + im * re_odd; out[2 * (k + n / 2)] = even_fft[2 * k] - re * re_odd + im * im_odd; out[2 * (k + n / 2) + 1] = even_fft[2 * k + 1] - re * im_odd - im * re_odd; } out } // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2337 fn dft<T: Float>(inp: &[T]) -> Vec<T> { let zero = T::zero(); let n = inp.len(); let two_pi = T::PI() + T::PI(); let mut out = Vec::with_capacity(2 * n); let n_t = T::from(n).unwrap(); for k in 0..n { let k_t = T::from(k).unwrap(); let mut re = zero; let mut im = zero; for (j, &inp) in inp.iter().enumerate() { let j_t = T::from(j).unwrap(); let angle = two_pi * k_t * j_t / n_t; re += inp * angle.cos(); im -= inp * angle.sin(); } out.push(re); out.push(im); } out } #[allow(clippy::too_many_arguments)] // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2414 fn log_mel_spectrogram_w<T: Float>( ith: usize, hann: &[T], samples: &[T], filters: &[T], fft_size: usize, fft_step: usize, speed_up: bool, n_len: usize, n_mel: usize, n_threads: usize, ) -> Vec<T> { let n_fft = if speed_up { 1 + fft_size / 4 } else { 1 + fft_size / 2 }; let zero = T::zero(); let half = T::from(0.5).unwrap(); let mut fft_in = vec![zero; fft_size]; let mut mel = vec![zero; n_len * n_mel]; for i in (ith..n_len).step_by(n_threads) { let offset = i * fft_step; // apply Hanning window for j in 0..fft_size { fft_in[j] = if offset + j < samples.len() { hann[j] * samples[offset + j] } else { zero } } // FFT -> mag^2 let mut fft_out: Vec<T> = fft(&fft_in); for j in 0..fft_size { fft_out[j] = fft_out[2 * j] * fft_out[2 * j] + fft_out[2 * j + 1] * fft_out[2 * j + 1]; } for j in 1..fft_size / 2 { let v = fft_out[fft_size - j]; fft_out[j] += v; } if speed_up { // scale down in the frequency domain results in a speed up in the time domain for j in 0..n_fft { fft_out[j] = half * (fft_out[2 * j] + fft_out[2 * j + 1]); } } // mel spectrogram for j in 0..n_mel { let mut sum = zero; for k in 0..n_fft { sum += fft_out[k] * filters[j * n_fft + k]; } mel[j * n_len + i] = T::max(sum, T::from(1e-10).unwrap()).log10(); } } mel } fn log_mel_spectrogram_<T: Float + std::fmt::Display>( samples: &[T], filters: &[T], fft_size: usize, fft_step: usize, n_mel: usize, speed_up: bool, ) -> Vec<T> { let zero = T::zero(); let two_pi = T::PI() + T::PI(); let half = T::from(0.5).unwrap(); let one = T::from(1.0).unwrap(); let four = T::from(4.0).unwrap(); let fft_size_t = T::from(fft_size).unwrap(); let hann: Vec<T> = (0..fft_size) .map(|i| half * (one - ((two_pi * T::from(i).unwrap()) / fft_size_t).cos())) .collect(); let n_len = samples.len() / fft_step; // pad audio with at least one extra chunk of zeros let pad = 100 * worker::m::CHUNK_LENGTH / 2; let n_len = if n_len % pad != 0 { (n_len / pad + 1) * pad } else { n_len }; let n_len = n_len + pad; let samples = { let mut samples_padded = samples.to_vec(); let to_add = n_len * fft_step - samples.len(); samples_padded.extend(std::iter::repeat(zero).take(to_add)); samples_padded }; // Use a single thread for now. let mut mel = log_mel_spectrogram_w( 0, &hann, &samples, filters, fft_size, fft_step, speed_up, n_len, n_mel, 1, ); let mmax = mel .iter() .max_by(|&u, &v| u.partial_cmp(v).unwrap_or(std::cmp::Ordering::Greater)) .copied() .unwrap_or(zero) - T::from(8).unwrap(); for m in mel.iter_mut() { let v = T::max(*m, mmax); *m = v / four + one } mel } pub fn pcm_to_mel<T: Float + std::fmt::Display>( cfg: &worker::m::Config, samples: &[T], filters: &[T], ) -> anyhow::Result<Vec<T>> { let mel = log_mel_spectrogram_( samples, filters, worker::m::N_FFT, worker::m::HOP_LENGTH, cfg.num_mel_bins, false, ); Ok(mel) }
candle/candle-wasm-examples/whisper/src/audio.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/audio.rs", "repo_id": "candle", "token_count": 3162 }
{ "version": "0.2.0", "configurations": [ { "command": "npm run dev", "name": "Run development server", "request": "launch", "type": "node-terminal" } ] }
chat-ui/.vscode/launch.json/0
{ "file_path": "chat-ui/.vscode/launch.json", "repo_id": "chat-ui", "token_count": 82 }
{{- if and .Values.serviceAccount.enabled .Values.serviceAccount.create }} apiVersion: v1 kind: ServiceAccount automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} metadata: name: "{{ .Values.serviceAccount.name | default (include "name" .) }}" namespace: {{ .Release.Namespace }} labels: {{ include "labels.standard" . | nindent 4 }} {{- with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} {{- end }}
chat-ui/chart/templates/service-account.yaml/0
{ "file_path": "chat-ui/chart/templates/service-account.yaml", "repo_id": "chat-ui", "token_count": 154 }
# Llama.cpp | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | No | | [Multimodal](../multimodal) | No | Chat UI supports the llama.cpp API server directly without the need for an adapter. You can do this using the `llamacpp` endpoint type. If you want to run Chat UI with llama.cpp, you can do the following, using [microsoft/Phi-3-mini-4k-instruct-gguf](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf) as an example model: ```bash # install llama.cpp brew install llama.cpp # start llama.cpp server llama-server --hf-repo microsoft/Phi-3-mini-4k-instruct-gguf --hf-file Phi-3-mini-4k-instruct-q4.gguf -c 4096 ``` _note: you can swap the `hf-repo` and `hf-file` with your fav GGUF on the [Hub](https://huggingface.co/models?library=gguf). For example: `--hf-repo TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF` for [this repo](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF) & `--hf-file tinyllama-1.1b-chat-v1.0.Q4_0.gguf` for [this file](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/blob/main/tinyllama-1.1b-chat-v1.0.Q4_0.gguf)._ A local LLaMA.cpp HTTP Server will start on `http://localhost:8080` (to change the port or any other default options, please find [LLaMA.cpp HTTP Server readme](https://github.com/ggerganov/llama.cpp/tree/master/examples/server)). Add the following to your `.env.local`: ```ini MODELS=`[ { "name": "Local microsoft/Phi-3-mini-4k-instruct-gguf", "tokenizer": "microsoft/Phi-3-mini-4k-instruct-gguf", "preprompt": "", "chatPromptTemplate": "<s>{{preprompt}}{{#each messages}}{{#ifUser}}<|user|>\n{{content}}<|end|>\n<|assistant|>\n{{/ifUser}}{{#ifAssistant}}{{content}}<|end|>\n{{/ifAssistant}}{{/each}}", "parameters": { "stop": ["<|end|>", "<|endoftext|>", "<|assistant|>"], "temperature": 0.7, "max_new_tokens": 1024, "truncate": 3071 }, "endpoints": [{ "type" : "llamacpp", "baseURL": "http://localhost:8080" }], }, ]` ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/llamacpp-light.png" height="auto"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/llamacpp-dark.png" height="auto"/> </div>
chat-ui/docs/source/configuration/models/providers/llamacpp.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/llamacpp.md", "repo_id": "chat-ui", "token_count": 1023 }
ENV_LOCAL_PATH=/app/.env.local if test -z "${DOTENV_LOCAL}" ; then if ! test -f "${ENV_LOCAL_PATH}" ; then echo "DOTENV_LOCAL was not found in the ENV variables and .env.local is not set using a bind volume. Make sure to set environment variables properly. " fi; else echo "DOTENV_LOCAL was found in the ENV variables. Creating .env.local file." cat <<< "$DOTENV_LOCAL" > ${ENV_LOCAL_PATH} fi; if [ "$INCLUDE_DB" = "true" ] ; then echo "Starting local MongoDB instance" nohup mongod & fi; export PUBLIC_VERSION=$(node -p "require('./package.json').version") dotenv -e /app/.env -c -- node /app/build/index.js -- --host 0.0.0.0 --port 3000
chat-ui/entrypoint.sh/0
{ "file_path": "chat-ui/entrypoint.sh", "repo_id": "chat-ui", "token_count": 266 }
<script lang="ts"> import { base } from "$app/paths"; import type { ToolLogoColor, ToolLogoIcon } from "$lib/types/Tool"; import { debounce } from "$lib/utils/debounce"; import { onMount } from "svelte"; import ToolLogo from "./ToolLogo.svelte"; import CarbonClose from "~icons/carbon/close"; interface ToolSuggestion { _id: string; displayName: string; createdByName: string; color: ToolLogoColor; icon: ToolLogoIcon; } interface Props { toolIds?: string[]; } let { toolIds = $bindable([]) }: Props = $props(); let selectedValues: ToolSuggestion[] = $state([]); onMount(async () => { selectedValues = await Promise.all( toolIds.map(async (id) => await fetch(`${base}/api/tools/${id}`).then((res) => res.json())) ); await fetchSuggestions(""); }); let inputValue = $state(""); let maxValues = 3; let suggestions: ToolSuggestion[] = $state([]); async function fetchSuggestions(query: string) { suggestions = (await fetch(`${base}/api/tools/search?q=${query}`).then((res) => res.json() )) satisfies ToolSuggestion[]; } const debouncedFetch = debounce((query: string) => fetchSuggestions(query), 300); function addValue(value: ToolSuggestion) { if (selectedValues.length < maxValues && !selectedValues.includes(value)) { selectedValues = [...selectedValues, value]; toolIds = [...toolIds, value._id]; inputValue = ""; suggestions = []; } } function removeValue(id: ToolSuggestion["_id"]) { selectedValues = selectedValues.filter((v) => v._id !== id); toolIds = selectedValues.map((value) => value._id); } </script> {#if selectedValues.length > 0} <div class="flex flex-wrap items-center justify-center gap-2"> {#each selectedValues as value} <div class="flex items-center justify-center space-x-2 rounded border border-gray-300 bg-gray-200 px-2 py-1" > {#key value.color + value.icon} <ToolLogo color={value.color} icon={value.icon} size="sm" /> {/key} <div class="flex flex-col items-center justify-center py-1"> <a href={`${base}/tools/${value._id}`} target="_blank" class="line-clamp-1 truncate font-semibold text-blue-600 hover:underline" >{value.displayName}</a > {#if value.createdByName} <p class="text-center text-xs text-gray-500"> Created by <a class="underline" href="{base}/tools?user={value.createdByName}" target="_blank" >{value.createdByName}</a > </p> {:else} <p class="text-center text-xs text-gray-500">Official HuggingChat tool</p> {/if} </div> <button onclick={(e) => { e.preventDefault(); e.stopPropagation(); removeValue(value._id); }} class="text-lg text-gray-600" > <CarbonClose /> </button> </div> {/each} </div> {/if} {#if selectedValues.length < maxValues} <div class="group relative block"> <input type="text" bind:value={inputValue} oninput={(ev) => { inputValue = ev.currentTarget.value; debouncedFetch(inputValue); }} disabled={selectedValues.length >= maxValues} class="w-full rounded border border-gray-200 bg-gray-100 px-3 py-2" class:opacity-50={selectedValues.length >= maxValues} class:bg-gray-100={selectedValues.length >= maxValues} placeholder="Type to search tools..." /> {#if suggestions.length > 0} <div class="invisible absolute z-10 mt-1 w-full rounded border border-gray-300 bg-white shadow-lg group-focus-within:visible" > {#if inputValue === ""} <p class="px-3 py-2 text-left text-xs text-gray-500"> Start typing to search for tools... </p> {:else} {#each suggestions as suggestion} <button onclick={(e) => { e.preventDefault(); e.stopPropagation(); addValue(suggestion); }} class="w-full cursor-pointer px-3 py-2 text-left hover:bg-blue-500 hover:text-white" > {suggestion.displayName} {#if suggestion.createdByName} <span class="text-xs text-gray-500"> by {suggestion.createdByName}</span> {/if} </button> {/each} {/if} </div> {/if} </div> {/if}
chat-ui/src/lib/components/AssistantToolPicker.svelte/0
{ "file_path": "chat-ui/src/lib/components/AssistantToolPicker.svelte", "repo_id": "chat-ui", "token_count": 1761 }
<script lang="ts"> import CarbonCaretLeft from "~icons/carbon/caret-left"; import CarbonCaretRight from "~icons/carbon/caret-right"; interface Props { href: string; direction: "next" | "previous"; isDisabled?: boolean; } let { href, direction, isDisabled = false }: Props = $props(); </script> <a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 {isDisabled ? 'pointer-events-none opacity-50' : ''}" {href} > {#if direction === "previous"} <CarbonCaretLeft classNames="mr-1.5" /> Previous {:else} Next <CarbonCaretRight classNames="ml-1.5" /> {/if} </a>
chat-ui/src/lib/components/PaginationArrow.svelte/0
{ "file_path": "chat-ui/src/lib/components/PaginationArrow.svelte", "repo_id": "chat-ui", "token_count": 254 }
<script lang="ts"> import type { Message } from "$lib/types/Message"; import CarbonTrashCan from "~icons/carbon/trash-can"; import CarbonChevronLeft from "~icons/carbon/chevron-left"; import CarbonChevronRight from "~icons/carbon/chevron-right"; import { enhance } from "$app/forms"; import { createEventDispatcher } from "svelte"; interface Props { message: Message; alternatives?: Message["id"][]; loading?: boolean; } let { message, alternatives = [], loading = false }: Props = $props(); let currentIdx = $derived(alternatives.findIndex((id) => id === message.id)); const dispatch = createEventDispatcher<{ showAlternateMsg: { id: Message["id"] }; }>(); </script> <div class="font-white group/navbranch z-0 -mt-1 ml-3.5 mr-auto flex h-6 w-fit select-none flex-row items-center justify-center gap-1 text-sm" > <button class="inline text-lg font-thin text-gray-400 hover:text-gray-800 disabled:pointer-events-none disabled:opacity-25 dark:text-gray-500 dark:hover:text-gray-200" onclick={() => dispatch("showAlternateMsg", { id: alternatives[Math.max(0, currentIdx - 1)] })} disabled={currentIdx === 0 || loading} > <CarbonChevronLeft class="text-sm" /> </button> <span class=" text-gray-400 dark:text-gray-500"> {currentIdx + 1} / {alternatives.length} </span> <button class="inline text-lg font-thin text-gray-400 hover:text-gray-800 disabled:pointer-events-none disabled:opacity-25 dark:text-gray-500 dark:hover:text-gray-200" onclick={() => dispatch("showAlternateMsg", { id: alternatives[Math.min(alternatives.length - 1, currentIdx + 1)], })} disabled={currentIdx === alternatives.length - 1 || loading} > <CarbonChevronRight class="text-sm" /> </button> {#if !loading && message.children}<form method="POST" action="?/deleteBranch" class="hidden group-hover/navbranch:block" use:enhance={({ cancel }) => { if (!confirm("Are you sure you want to delete this branch?")) { cancel(); } }} > <input name="messageId" value={message.id} type="hidden" /> <button class="flex items-center justify-center text-xs text-gray-400 hover:text-gray-800 dark:text-gray-500 dark:hover:text-gray-200" type="submit" ><CarbonTrashCan /> </button> </form> {/if} </div>
chat-ui/src/lib/components/chat/Alternatives.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/Alternatives.svelte", "repo_id": "chat-ui", "token_count": 844 }
import { Database } from "$lib/server/database"; import { migrations } from "./routines"; import { acquireLock, releaseLock, isDBLocked, refreshLock } from "./lock"; import { isHuggingChat } from "$lib/utils/isHuggingChat"; import { logger } from "$lib/server/logger"; const LOCK_KEY = "migrations"; export async function checkAndRunMigrations() { // make sure all GUIDs are unique if (new Set(migrations.map((m) => m._id.toString())).size !== migrations.length) { throw new Error("Duplicate migration GUIDs found."); } // check if all migrations have already been run const migrationResults = await Database.getInstance() .getCollections() .migrationResults.find() .toArray(); logger.info("[MIGRATIONS] Begin check..."); // connect to the database const connectedClient = await Database.getInstance().getClient().connect(); const lockId = await acquireLock(LOCK_KEY); if (!lockId) { // another instance already has the lock, so we exit early logger.info( "[MIGRATIONS] Another instance already has the lock. Waiting for DB to be unlocked." ); // Todo: is this necessary? Can we just return? // block until the lock is released while (await isDBLocked(LOCK_KEY)) { await new Promise((resolve) => setTimeout(resolve, 1000)); } return; } // once here, we have the lock // make sure to refresh it regularly while it's running const refreshInterval = setInterval(async () => { await refreshLock(LOCK_KEY, lockId); }, 1000 * 10); // iterate over all migrations for (const migration of migrations) { // check if the migration has already been applied const shouldRun = migration.runEveryTime || !migrationResults.find((m) => m._id.toString() === migration._id.toString()); // check if the migration has already been applied if (!shouldRun) { logger.info(`[MIGRATIONS] "${migration.name}" already applied. Skipping...`); } else { // check the modifiers to see if some cases match if ( (migration.runForHuggingChat === "only" && !isHuggingChat) || (migration.runForHuggingChat === "never" && isHuggingChat) ) { logger.info( `[MIGRATIONS] "${migration.name}" should not be applied for this run. Skipping...` ); continue; } // otherwise all is good and we can run the migration logger.info( `[MIGRATIONS] "${migration.name}" ${ migration.runEveryTime ? "should run every time" : "not applied yet" }. Applying...` ); await Database.getInstance() .getCollections() .migrationResults.updateOne( { _id: migration._id }, { $set: { name: migration.name, status: "ongoing", }, }, { upsert: true } ); const session = connectedClient.startSession(); let result = false; try { await session.withTransaction(async () => { result = await migration.up(Database.getInstance()); }); } catch (e) { logger.info(`[MIGRATIONS] "${migration.name}" failed!`); logger.error(e); } finally { await session.endSession(); } await Database.getInstance() .getCollections() .migrationResults.updateOne( { _id: migration._id }, { $set: { name: migration.name, status: result ? "success" : "failure", }, }, { upsert: true } ); } } logger.info("[MIGRATIONS] All migrations applied. Releasing lock"); clearInterval(refreshInterval); await releaseLock(LOCK_KEY, lockId); }
chat-ui/src/lib/migrations/migrations.ts/0
{ "file_path": "chat-ui/src/lib/migrations/migrations.ts", "repo_id": "chat-ui", "token_count": 1286 }
import { z } from "zod"; import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints"; import { chunk } from "$lib/utils/chunk"; import { env } from "$env/dynamic/private"; import { logger } from "$lib/server/logger"; export const embeddingEndpointHfApiSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("hfapi"), authorization: z .string() .optional() .transform((v) => (!v && env.HF_TOKEN ? "Bearer " + env.HF_TOKEN : v)), // if the header is not set but HF_TOKEN is, use it as the authorization header }); export async function embeddingEndpointHfApi( input: z.input<typeof embeddingEndpointHfApiSchema> ): Promise<EmbeddingEndpoint> { const { model, authorization } = embeddingEndpointHfApiSchema.parse(input); const url = `${env.HF_API_ROOT}/${model.id}`; return async ({ inputs }) => { const batchesInputs = chunk(inputs, 128); const batchesResults = await Promise.all( batchesInputs.map(async (batchInputs) => { const response = await fetch(`${url}`, { method: "POST", headers: { Accept: "application/json", "Content-Type": "application/json", ...(authorization ? { Authorization: authorization } : {}), }, body: JSON.stringify({ inputs: { source_sentence: batchInputs[0], sentences: batchInputs.slice(1), }, }), }); if (!response.ok) { logger.error(await response.text()); logger.error(response, "Failed to get embeddings from Hugging Face API"); return []; } const embeddings: Embedding[] = await response.json(); return embeddings; }) ); const flatAllEmbeddings = batchesResults.flat(); return flatAllEmbeddings; }; }
chat-ui/src/lib/server/embeddingEndpoints/hfApi/embeddingHfApi.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/hfApi/embeddingHfApi.ts", "repo_id": "chat-ui", "token_count": 671 }
import type { Sharp } from "sharp"; import sharp from "sharp"; import type { MessageFile } from "$lib/types/Message"; import { z, type util } from "zod"; export interface ImageProcessorOptions<TMimeType extends string = string> { supportedMimeTypes: TMimeType[]; preferredMimeType: TMimeType; maxSizeInMB: number; maxWidth: number; maxHeight: number; } export type ImageProcessor<TMimeType extends string = string> = (file: MessageFile) => Promise<{ image: Buffer; mime: TMimeType; }>; export function createImageProcessorOptionsValidator<TMimeType extends string = string>( defaults: ImageProcessorOptions<TMimeType> ) { return z .object({ supportedMimeTypes: z .array( z.enum<string, [TMimeType, ...TMimeType[]]>([ defaults.supportedMimeTypes[0], ...defaults.supportedMimeTypes.slice(1), ]) ) .default(defaults.supportedMimeTypes), preferredMimeType: z .enum([defaults.supportedMimeTypes[0], ...defaults.supportedMimeTypes.slice(1)]) .default(defaults.preferredMimeType as util.noUndefined<TMimeType>), maxSizeInMB: z.number().positive().default(defaults.maxSizeInMB), maxWidth: z.number().int().positive().default(defaults.maxWidth), maxHeight: z.number().int().positive().default(defaults.maxHeight), }) .default(defaults); } export function makeImageProcessor<TMimeType extends string = string>( options: ImageProcessorOptions<TMimeType> ): ImageProcessor<TMimeType> { return async (file) => { const { supportedMimeTypes, preferredMimeType, maxSizeInMB, maxWidth, maxHeight } = options; const { mime, value } = file; const buffer = Buffer.from(value, "base64"); let sharpInst = sharp(buffer); const metadata = await sharpInst.metadata(); if (!metadata) throw Error("Failed to read image metadata"); const { width, height } = metadata; if (width === undefined || height === undefined) throw Error("Failed to read image size"); const tooLargeInSize = width > maxWidth || height > maxHeight; const tooLargeInBytes = buffer.byteLength > maxSizeInMB * 1000 * 1000; const outputMime = chooseMimeType(supportedMimeTypes, preferredMimeType, mime, { preferSizeReduction: tooLargeInBytes, }); // Resize if necessary if (tooLargeInSize || tooLargeInBytes) { const size = chooseImageSize({ mime: outputMime, width, height, maxWidth, maxHeight, maxSizeInMB, }); if (size.width !== width || size.height !== height) { sharpInst = resizeImage(sharpInst, size.width, size.height); } } // Convert format if necessary // We always want to convert the image when the file was too large in bytes // so we can guarantee that ideal options are used, which are expected when // choosing the image size if (outputMime !== mime || tooLargeInBytes) { sharpInst = convertImage(sharpInst, outputMime); } const processedImage = await sharpInst.toBuffer(); return { image: processedImage, mime: outputMime }; }; } const outputFormats = ["png", "jpeg", "webp", "avif", "tiff", "gif"] as const; type OutputImgFormat = (typeof outputFormats)[number]; const isOutputFormat = (format: string): format is (typeof outputFormats)[number] => outputFormats.includes(format as OutputImgFormat); export function convertImage(sharpInst: Sharp, outputMime: string): Sharp { const [type, format] = outputMime.split("/"); if (type !== "image") throw Error(`Requested non-image mime type: ${outputMime}`); if (!isOutputFormat(format)) { throw Error(`Requested to convert to an unsupported format: ${format}`); } return sharpInst[format](); } // heic/heif requires proprietary license // TODO: blocking heif may be incorrect considering it also supports av1, so we should instead // detect the compression method used via sharp().metadata().compression // TODO: consider what to do about animated formats: apng, gif, animated webp, ... const blocklistedMimes = ["image/heic", "image/heif"]; /** Sorted from largest to smallest */ const mimesBySizeDesc = [ "image/png", "image/tiff", "image/gif", "image/jpeg", "image/webp", "image/avif", ]; /** * Defaults to preferred format or uses existing mime if supported * When preferSizeReduction is true, it will choose the smallest format that is supported **/ function chooseMimeType<T extends readonly string[]>( supportedMimes: T, preferredMime: string, mime: string, { preferSizeReduction }: { preferSizeReduction: boolean } ): T[number] { if (!supportedMimes.includes(preferredMime)) { const supportedMimesStr = supportedMimes.join(", "); throw Error( `Preferred format "${preferredMime}" not found in supported mimes: ${supportedMimesStr}` ); } const [type] = mime.split("/"); if (type !== "image") throw Error(`Received non-image mime type: ${mime}`); if (supportedMimes.includes(mime) && !preferSizeReduction) return mime; if (blocklistedMimes.includes(mime)) throw Error(`Received blocklisted mime type: ${mime}`); const smallestMime = mimesBySizeDesc.findLast((m) => supportedMimes.includes(m)); return smallestMime ?? preferredMime; } interface ImageSizeOptions { mime: string; width: number; height: number; maxWidth: number; maxHeight: number; maxSizeInMB: number; } /** Resizes the image to fit within the specified size in MB by guessing the output size */ export function chooseImageSize({ mime, width, height, maxWidth, maxHeight, maxSizeInMB, }: ImageSizeOptions): { width: number; height: number } { const biggestDiscrepency = Math.max(1, width / maxWidth, height / maxHeight); let selectedWidth = Math.ceil(width / biggestDiscrepency); let selectedHeight = Math.ceil(height / biggestDiscrepency); do { const estimatedSize = estimateImageSizeInBytes(mime, selectedWidth, selectedHeight); if (estimatedSize < maxSizeInMB * 1024 * 1024) { return { width: selectedWidth, height: selectedHeight }; } selectedWidth = Math.floor(selectedWidth / 1.1); selectedHeight = Math.floor(selectedHeight / 1.1); } while (selectedWidth > 1 && selectedHeight > 1); throw Error(`Failed to resize image to fit within ${maxSizeInMB}MB`); } const mimeToCompressionRatio: Record<string, number> = { "image/png": 1 / 2, "image/jpeg": 1 / 10, "image/webp": 1 / 4, "image/avif": 1 / 5, "image/tiff": 1, "image/gif": 1 / 5, }; /** * Guesses the side of an image in MB based on its format and dimensions * Should guess the worst case **/ function estimateImageSizeInBytes(mime: string, width: number, height: number): number { const compressionRatio = mimeToCompressionRatio[mime]; if (!compressionRatio) throw Error(`Unsupported image format: ${mime}`); const bitsPerPixel = 32; // Assuming 32-bit color depth for 8-bit R G B A const bytesPerPixel = bitsPerPixel / 8; const uncompressedSize = width * height * bytesPerPixel; return uncompressedSize * compressionRatio; } export function resizeImage(sharpInst: Sharp, maxWidth: number, maxHeight: number): Sharp { return sharpInst.resize({ width: maxWidth, height: maxHeight, fit: "inside" }); }
chat-ui/src/lib/server/endpoints/images.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/images.ts", "repo_id": "chat-ui", "token_count": 2311 }
import { generateFromDefaultEndpoint } from "../generateFromDefaultEndpoint"; import { getReturnFromGenerator } from "$lib/utils/getReturnFromGenerator"; import { logger } from "../logger"; export async function generateSummaryOfReasoning(buffer: string): Promise<string> { // debug 5s delay await new Promise((resolve) => setTimeout(resolve, 3000)); const summary = await getReturnFromGenerator( generateFromDefaultEndpoint({ messages: [ { from: "user", content: buffer.slice(-200), }, ], preprompt: `You are tasked with summarizing the latest reasoning steps. Never describe results of the reasoning, only the process. Remain vague in your summary. The text might be incomplete, try your best to summarize it in one very short sentence, starting with a gerund and ending with three points. Example: "Thinking about life...", "Summarizing the results...", "Processing the input..."`, generateSettings: { max_new_tokens: 50, }, }) ) .then((summary) => { const parts = summary.split("..."); return parts[0] + "..."; }) .catch((e) => { logger.error(e); return "Reasoning..."; }); return summary; }
chat-ui/src/lib/server/textGeneration/reasoning.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/reasoning.ts", "repo_id": "chat-ui", "token_count": 403 }
import type { SerializedHTMLElement } from "../scrape/types"; import { htmlElementToMarkdownElements, mergeAdjacentElements } from "./fromHtml"; import type { HeaderElement, MarkdownElement } from "./types"; import { MarkdownElementType } from "./types"; import { chunkElements } from "./utils/chunk"; /** * Converts HTML elements to Markdown elements and creates a tree based on header tags * For example: h1 [h2 [p p blockquote] h2 [h3 [...] ] ] **/ export function htmlToMarkdownTree( title: string, htmlElements: SerializedHTMLElement[], maxCharsPerElem: number ): HeaderElement { let parent: HeaderElement = { type: MarkdownElementType.Header, level: 1, parent: null, content: title, children: [], }; const markdownElements = chunkElements( mergeAdjacentElements( htmlElements.flatMap((elem) => htmlElementToMarkdownElements(parent, elem)) ), maxCharsPerElem ); for (const elem of markdownElements) { if (elem.type !== MarkdownElementType.Header) { elem.parent = parent; parent.children.push(elem); continue; } // add 1 to current level to offset for the title being level 1 elem.level += 1; // Pop up header levels until reaching the same level as the current header // or until we reach the root inner: while (parent !== null && parent.parent !== null) { if (parent.level < elem.level) break inner; parent = parent.parent; } parent.children.push(elem); parent = elem; } // Pop up to the root while (parent.parent !== null) { parent = parent.parent; } return parent; } export function removeParents<T extends MarkdownElement>(elem: T): T { if ("children" in elem) { return { ...elem, parent: null, children: elem.children.map((child) => removeParents(child)) }; } return { ...elem, parent: null }; }
chat-ui/src/lib/server/websearch/markdown/tree.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/tree.ts", "repo_id": "chat-ui", "token_count": 613 }
import { env } from "$env/dynamic/private"; import type { WebSearchSource } from "$lib/types/WebSearch"; export default async function search(query: string): Promise<WebSearchSource[]> { const params = { q: query, hl: "en", gl: "us", }; const response = await fetch("https://google.serper.dev/search", { method: "POST", body: JSON.stringify(params), headers: { "x-api-key": env.SERPER_API_KEY, "Content-type": "application/json", }, }); /* eslint-disable @typescript-eslint/no-explicit-any */ const data = (await response.json()) as Record<string, any>; if (!response.ok) { throw new Error( data["message"] ?? `Serper API returned error code ${response.status} - ${response.statusText}` ); } return data["organic"] ?? []; }
chat-ui/src/lib/server/websearch/search/endpoints/serper.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/serper.ts", "repo_id": "chat-ui", "token_count": 282 }
import type { ObjectId } from "mongodb"; import type { User } from "./User"; import type { Timestamps } from "./Timestamps"; import type { ReviewStatus } from "./Review"; export interface Assistant extends Timestamps { _id: ObjectId; createdById: User["_id"] | string; // user id or session createdByName?: User["username"]; avatar?: string; name: string; description?: string; modelId: string; exampleInputs: string[]; preprompt: string; userCount?: number; review: ReviewStatus; rag?: { allowAllDomains: boolean; allowedDomains: string[]; allowedLinks: string[]; }; generateSettings?: { temperature?: number; top_p?: number; repetition_penalty?: number; top_k?: number; }; dynamicPrompt?: boolean; searchTokens: string[]; last24HoursCount: number; tools?: string[]; } // eslint-disable-next-line no-shadow export enum SortKey { POPULAR = "popular", TRENDING = "trending", }
chat-ui/src/lib/types/Assistant.ts/0
{ "file_path": "chat-ui/src/lib/types/Assistant.ts", "repo_id": "chat-ui", "token_count": 318 }
import type { Message } from "./Message"; import type { Tool, ToolResult } from "./Tool"; export type ChatTemplateInput = { messages: Pick<Message, "from" | "content" | "files">[]; preprompt?: string; tools?: Tool[]; toolResults?: ToolResult[]; continueMessage?: boolean; };
chat-ui/src/lib/types/Template.ts/0
{ "file_path": "chat-ui/src/lib/types/Template.ts", "repo_id": "chat-ui", "token_count": 89 }
<script lang="ts"> import { page } from "$app/state"; </script> <div class="flex items-center justify-center bg-gradient-to-t from-gray-200 text-gray-800 dark:from-gray-700 dark:text-gray-300" > <div class="align-center -mt-24 flex flex-col justify-center rounded-xl border bg-white px-8 pb-2 pt-4 text-center dark:border-gray-700 dark:bg-gray-800" > <h1 class="mb-2 text-5xl font-semibold">{page.status}</h1> <div class="-mx-8 my-2 h-px bg-gray-200 dark:bg-gray-700"></div> <h2 class="max-w-sm text-lg">{page.error?.message}</h2> {#if page.error?.errorId} <div class="-mx-8 my-2 h-px bg-gray-200 dark:bg-gray-700"></div> <pre class="max-w-sm whitespace-pre-wrap text-left font-mono text-xs">{page.error .errorId}</pre> {/if} </div> </div>
chat-ui/src/routes/+error.svelte/0
{ "file_path": "chat-ui/src/routes/+error.svelte", "repo_id": "chat-ui", "token_count": 342 }