text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch TrOCR model.""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class TrOCRStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, decoder_attention_heads=4, max_position_embeddings=30, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = TrOCRConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, ) return (config, input_ids, attention_mask, lm_labels) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = TrOCRDecoder(config=config).to(torch_device).eval() input_ids = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((2, 1), config.vocab_size - 1) + 1 # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, lm_labels = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class TrOCRStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () all_generative_model_classes = (TrOCRForCausalLM,) if is_torch_available() else () pipeline_model_mapping = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} fx_compatible = True test_pruning = False def setUp(self): self.model_tester = TrOCRStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=TrOCRConfig) @unittest.skip(reason="Not yet implemented") def test_inputs_embeds(self): pass @unittest.skip(reason="trocr has no base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="trocr has no base model") def test_save_load_fast_init_to_base(self): pass def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) @unittest.skip(reason="Decoder cannot keep gradients") def test_retain_grad_hidden_states_attentions(self): return @unittest.skip(reason="The model doesn't support left padding") # and it's not used enough to be worth fixing :) def test_left_padding_compatibility(self): pass
transformers/tests/models/trocr/test_modeling_trocr.py/0
{ "file_path": "transformers/tests/models/trocr/test_modeling_trocr.py", "repo_id": "transformers", "token_count": 3182 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import random import tempfile import unittest import numpy as np from datasets import Audio, load_dataset from transformers import UnivNetFeatureExtractor from transformers.testing_utils import check_json_file_has_correct_format, require_torch, slow from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class UnivNetFeatureExtractionTester: def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, sampling_rate=24000, padding_value=0.0, do_normalize=True, num_mel_bins=100, hop_length=256, win_length=1024, win_function="hann_window", filter_length=1024, max_length_s=10, fmin=0.0, fmax=12000, mel_floor=1e-9, center=False, compression_factor=1.0, compression_clip_val=1e-5, normalize_min=-11.512925148010254, normalize_max=2.3143386840820312, model_in_channels=64, pad_end_length=10, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.sampling_rate = sampling_rate self.padding_value = padding_value self.do_normalize = do_normalize self.num_mel_bins = num_mel_bins self.hop_length = hop_length self.win_length = win_length self.win_function = win_function self.filter_length = filter_length self.max_length_s = max_length_s self.fmin = fmin self.fmax = fmax self.mel_floor = mel_floor self.center = center self.compression_factor = compression_factor self.compression_clip_val = compression_clip_val self.normalize_min = normalize_min self.normalize_max = normalize_max self.model_in_channels = model_in_channels self.pad_end_length = pad_end_length def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "sampling_rate": self.sampling_rate, "padding_value": self.padding_value, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "filter_length": self.filter_length, "max_length_s": self.max_length_s, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "center": self.center, "compression_factor": self.compression_factor, "compression_clip_val": self.compression_clip_val, "normalize_min": self.normalize_min, "normalize_max": self.normalize_max, "model_in_channels": self.model_in_channels, "pad_end_length": self.pad_end_length, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size speech_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs class UnivNetFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = UnivNetFeatureExtractor def setUp(self): self.feat_extract_tester = UnivNetFeatureExtractionTester(self) # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_from_and_save_pretrained def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_to_json_file def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor( np_speech_inputs, padding="max_length", max_length=1600, return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3) # Note: for some reason I get a weird padding error when feature_size > 1 # self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size) # Note: we use the shape convention (batch_size, seq_len, num_mel_bins) self.assertTrue(input_features.shape[-1] == feature_extractor.num_mel_bins) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test truncation required speech_inputs = [ floats_list((1, x))[0] for x in range((feature_extractor.num_max_samples - 100), (feature_extractor.num_max_samples + 500), 200) ] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] speech_inputs_truncated = [x[: feature_extractor.num_max_samples] for x in speech_inputs] np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated] encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_batched_unbatched_consistency(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = floats_list((1, 800))[0] np_speech_inputs = np.asarray(speech_inputs) # Test unbatched vs batched list encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor([speech_inputs], return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test np.ndarray vs List[np.ndarray] encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor([np_speech_inputs], return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test unbatched np.ndarray vs batched np.ndarray encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor( np.expand_dims(np_speech_inputs, axis=0), return_tensors="np" ).input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_generate_noise(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] features = feature_extractor(speech_inputs, return_noise=True) input_features = features.input_features noise_features = features.noise_sequence for spectrogram, noise in zip(input_features, noise_features): self.assertEqual(spectrogram.shape[0], noise.shape[0]) def test_pad_end(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] input_features1 = feature_extractor(speech_inputs, padding=False, pad_end=False).input_features input_features2 = feature_extractor(speech_inputs, padding=False, pad_end=True).input_features for spectrogram1, spectrogram2 in zip(input_features1, input_features2): self.assertEqual(spectrogram1.shape[0] + self.feat_extract_tester.pad_end_length, spectrogram2.shape[0]) def test_generate_noise_and_pad_end(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] features = feature_extractor(speech_inputs, padding=False, return_noise=True, pad_end=True) input_features = features.input_features noise_features = features.noise_sequence for spectrogram, noise in zip(input_features, noise_features): self.assertEqual(spectrogram.shape[0], noise.shape[0]) @require_torch def test_batch_decode(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) input_lengths = list(range(800, 1400, 200)) pad_samples = feature_extractor.pad_end_length * feature_extractor.hop_length output_features = { "waveforms": torch.tensor(floats_list((3, max(input_lengths) + pad_samples))), "waveform_lengths": torch.tensor(input_lengths), } waveforms = feature_extractor.batch_decode(**output_features) for input_length, waveform in zip(input_lengths, waveforms): self.assertTrue(len(waveform.shape) == 1, msg="Individual output waveforms should be 1D") self.assertEqual(waveform.shape[0], input_length) @require_torch # Copied from tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", Audio(sampling_rate=self.feat_extract_tester.sampling_rate)) # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples] @slow @require_torch def test_integration(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ -5.0229, -6.1358, -5.8346, -5.4447, -5.6707, -5.8577, -5.0464, -5.0058, -5.6015, -5.6410, -5.4325, -5.6116, -5.3700, -5.7956, -5.3196, -5.3274, -5.9655, -5.6057, -5.8382, -5.9602, -5.9005, -5.9123, -5.7669, -6.1441, -5.5168, -5.1405, -5.3927, -6.0032, -5.5784, -5.3728 ], ) # fmt: on input_speech, sr = self._load_datasamples(1) feature_extractor = UnivNetFeatureExtractor() input_features = feature_extractor(input_speech, sampling_rate=sr[0], return_tensors="pt").input_features self.assertEqual(input_features.shape, (1, 548, 100)) input_features_mean = torch.mean(input_features) input_features_stddev = torch.std(input_features) EXPECTED_MEAN = torch.tensor(-6.18862009) EXPECTED_STDDEV = torch.tensor(2.80845642) torch.testing.assert_close(input_features_mean, EXPECTED_MEAN, rtol=5e-5, atol=5e-5) torch.testing.assert_close(input_features_stddev, EXPECTED_STDDEV) torch.testing.assert_close(input_features[0, :30, 0], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
transformers/tests/models/univnet/test_feature_extraction_univnet.py/0
{ "file_path": "transformers/tests/models/univnet/test_feature_extraction_univnet.py", "repo_id": "transformers", "token_count": 7239 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VITS model.""" import copy import os import tempfile import unittest from typing import Dict, List, Tuple import numpy as np from transformers import PretrainedConfig, VitsConfig from transformers.testing_utils import ( is_flaky, is_torch_available, require_torch, require_torch_fp16, require_torch_multi_gpu, slow, torch_device, ) from transformers.trainer_utils import set_seed from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, global_rng, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import VitsModel, VitsTokenizer CONFIG_NAME = "config.json" GENERATION_CONFIG_NAME = "generation_config.json" def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init @require_torch class VitsModelTester: def __init__( self, parent, batch_size=2, seq_length=7, is_training=False, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=64, flow_size=16, vocab_size=38, spectrogram_bins=8, duration_predictor_num_flows=2, duration_predictor_filter_channels=16, prior_encoder_num_flows=2, upsample_initial_channel=16, upsample_rates=[8, 2], upsample_kernel_sizes=[16, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.flow_size = flow_size self.vocab_size = vocab_size self.spectrogram_bins = spectrogram_bins self.duration_predictor_num_flows = duration_predictor_num_flows self.duration_predictor_filter_channels = duration_predictor_filter_channels self.prior_encoder_num_flows = prior_encoder_num_flows self.upsample_initial_channel = upsample_initial_channel self.upsample_rates = upsample_rates self.upsample_kernel_sizes = upsample_kernel_sizes self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(2) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return VitsConfig( hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, flow_size=self.flow_size, vocab_size=self.vocab_size, spectrogram_bins=self.spectrogram_bins, duration_predictor_num_flows=self.duration_predictor_num_flows, prior_encoder_num_flows=self.prior_encoder_num_flows, duration_predictor_filter_channels=self.duration_predictor_filter_channels, posterior_encoder_num_wavenet_layers=self.num_hidden_layers, upsample_initial_channel=self.upsample_initial_channel, upsample_rates=self.upsample_rates, upsample_kernel_sizes=self.upsample_kernel_sizes, resblock_kernel_sizes=self.resblock_kernel_sizes, resblock_dilation_sizes=self.resblock_dilation_sizes, ) def create_and_check_model_forward(self, config, inputs_dict): model = VitsModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] result = model(input_ids, attention_mask=attention_mask) self.parent.assertEqual((self.batch_size, 624), result.waveform.shape) @require_torch class VitsModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (VitsModel,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": VitsModel, "text-to-audio": VitsModel} if is_torch_available() else {} ) is_encoder_decoder = False test_pruning = False test_headmasking = False test_resize_embeddings = False test_head_masking = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = VitsModelTester(self) self.config_tester = ConfigTester(self, config_class=VitsConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() # TODO: @ydshieh @is_flaky(description="torch 2.2.0 gives `Timeout >120.0s`") def test_pipeline_feature_extraction(self): super().test_pipeline_feature_extraction() @is_flaky(description="torch 2.2.0 gives `Timeout >120.0s`") def test_pipeline_feature_extraction_fp16(self): super().test_pipeline_feature_extraction_fp16() @unittest.skip(reason="Need to fix this after #26538") def test_model_forward(self): set_seed(12345) global_rng.seed(12345) config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) @require_torch_multi_gpu # override to force all elements of the batch to have the same sequence length across GPUs def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_stochastic_duration_prediction = False # move input tensors to cuda:O for key, value in inputs_dict.items(): if torch.is_tensor(value): # make all elements of the batch the same -> ensures the output seq lengths are the same for DP value[1:] = value[0] inputs_dict[key] = value.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = torch.nn.DataParallel(model) set_seed(555) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)).waveform @unittest.skip(reason="VITS is not deterministic") def test_determinism(self): pass @unittest.skip(reason="VITS is not deterministic") def test_batching_equivalence(self): pass @is_flaky( max_attempts=3, description="Weight initialisation for the VITS conv layers sometimes exceeds the kaiming normal range", ) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() uniform_init_parms = [ "emb_rel_k", "emb_rel_v", "conv_1", "conv_2", "conv_pre", "conv_post", "conv_proj", "conv_dds", "project", "wavenet.in_layers", "wavenet.res_skip_layers", "upsampler", "resblocks", ] configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="VITS has no inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="VITS has no input embeddings") def test_model_get_set_embeddings(self): pass # override since the model is not deterministic, so we need to set the seed for each forward pass def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): set_seed(0) tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) set_seed(0) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) # override since the model is not deterministic, so we need to set the seed for each forward pass def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_save_load(out1, out2): # make sure we don't have nans out_2 = out2.cpu().numpy() out_2[np.isnan(out_2)] = 0 out_1 = out1.cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): set_seed(0) first = model(**self._prepare_for_class(inputs_dict, model_class))[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): set_seed(0) second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_save_load(tensor1, tensor2) else: check_save_load(first, second) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) @require_torch @slow class VitsModelIntegrationTests(unittest.TestCase): def test_forward(self): # GPU gives different results than CPU torch_device = "cpu" model = VitsModel.from_pretrained("facebook/mms-tts-eng") model.to(torch_device) tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") set_seed(555) # make deterministic input_text = "Mister quilter is the apostle of the middle classes and we are glad to welcome his gospel!" input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(torch_device) with torch.no_grad(): outputs = model(input_ids) self.assertEqual(outputs.waveform.shape, (1, 87040)) # fmt: off EXPECTED_LOGITS = torch.tensor( [ -0.0042, 0.0176, 0.0354, 0.0504, 0.0621, 0.0777, 0.0980, 0.1224, 0.1475, 0.1679, 0.1817, 0.1832, 0.1713, 0.1542, 0.1384, 0.1256, 0.1147, 0.1066, 0.1026, 0.0958, 0.0823, 0.0610, 0.0340, 0.0022, -0.0337, -0.0677, -0.0969, -0.1178, -0.1311, -0.1363 ] ) # fmt: on torch.testing.assert_close(outputs.waveform[0, 10000:10030].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4) @require_torch_fp16 def test_forward_fp16(self): # GPU gives different results than CPU torch_device = "cpu" model = VitsModel.from_pretrained("facebook/mms-tts-eng", torch_dtype=torch.float16) model.to(torch_device) tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") set_seed(555) # make deterministic input_text = "Mister quilter is the apostle of the middle classes and we are glad to welcome his gospel!" input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(torch_device) with torch.no_grad(): outputs = model(input_ids) self.assertEqual(outputs.waveform.shape, (1, 87040)) # fmt: off EXPECTED_LOGITS = torch.tensor( [ 0.0101, 0.0318, 0.0489, 0.0627, 0.0728, 0.0865, 0.1053, 0.1279, 0.1514, 0.1703, 0.1827, 0.1829, 0.1694, 0.1509, 0.1332, 0.1188, 0.1066, 0.0978, 0.0936, 0.0867, 0.0724, 0.0493, 0.0197, -0.0141, -0.0501, -0.0817, -0.1065, -0.1223, -0.1311, -0.1339 ] ).to(torch.float16) # fmt: on torch.testing.assert_close(outputs.waveform[0, 10000:10030].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
transformers/tests/models/vits/test_modeling_vits.py/0
{ "file_path": "transformers/tests/models/vits/test_modeling_vits.py", "repo_id": "transformers", "token_count": 9245 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Wav2Vec2-Conformer model.""" import math import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import Wav2Vec2ConformerConfig, is_torch_available from transformers.testing_utils import ( is_flaky, is_pt_flax_cross_test, require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForCTC, Wav2Vec2ConformerForPreTraining, Wav2Vec2ConformerForSequenceClassification, Wav2Vec2ConformerForXVector, Wav2Vec2ConformerModel, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer import ( Wav2Vec2ConformerGumbelVectorQuantizer, _compute_mask_indices, _sample_negative_indices, ) class Wav2Vec2ConformerModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=2, num_attention_heads=2, hidden_dropout_prob=0.1, intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, num_adapter_layers=1, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, position_embeddings_type="relative", scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim self.position_embeddings_type = position_embeddings_type output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = (self.output_seq_length - 1) // adapter_stride + 1 def prepare_config_and_inputs(self, position_embeddings_type="relative"): input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config(position_embeddings_type=position_embeddings_type) return config, input_values, attention_mask def get_config(self, position_embeddings_type="relative"): return Wav2Vec2ConformerConfig( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, do_stable_layer_norm=self.do_stable_layer_norm, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, position_embeddings_type=position_embeddings_type, ) def create_and_check_model(self, config, input_values, attention_mask): model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_values, attention_mask): config.add_adapter = True model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_for_ctc(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 2 * config.hidden_size model = Wav2Vec2ConformerForCTC(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size) ) def create_and_check_model_with_adapter_proj_dim(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_model_float16(self, config, input_values, attention_mask): model = Wav2Vec2ConformerModel(config=config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = Wav2Vec2ConformerModel.from_pretrained(tmpdirname, torch_dtype=torch.float16) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_values.type(dtype=torch.float16), attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = Wav2Vec2ConformerModel(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = Wav2Vec2ConformerForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = Wav2Vec2ConformerForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ConformerForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lengths are at least # one shorter than logit lengths to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ConformerForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ConformerForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = Wav2Vec2ConformerForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Wav2Vec2ConformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Wav2Vec2ConformerForCTC, Wav2Vec2ConformerModel, Wav2Vec2ConformerForSequenceClassification, Wav2Vec2ConformerForPreTraining, Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForXVector, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Wav2Vec2ConformerForSequenceClassification, "automatic-speech-recognition": Wav2Vec2ConformerForCTC, "feature-extraction": Wav2Vec2ConformerModel, } if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = Wav2Vec2ConformerModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2ConformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @is_flaky( description="The `codevector_idx` computed with `argmax()` in `Wav2Vec2ConformerGumbelVectorQuantizer.forward` is not stable." ) def test_batching_equivalence(self): super().test_batching_equivalence() def test_model_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_no_rel_pos(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type=None) self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_for_ctc(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model_float16(*config_and_inputs) @require_torch_accelerator @require_torch_fp16 def test_model_float16_with_rotary(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="rotary") self.model_tester.create_and_check_model_float16(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) @unittest.skip(reason="Wav2Vec2Conformer has not inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Wav2Vec2Conformer has input_values instead of input_ids") def test_forward_signature(self): pass @unittest.skip(reason="Wav2Vec2Conformer has not token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Wav2Vec2Conformer has not inputs_embeds") def test_model_get_set_embeddings(self): pass @is_pt_flax_cross_test @unittest.skip(reason="Non-robust architecture does not exist in Flax") def test_equivalence_flax_to_pt(self): pass @is_pt_flax_cross_test @unittest.skip(reason="Non-robust architecture does not exist in Flax") def test_equivalence_pt_to_flax(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "conv.parametrizations.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "pos_bias_v", "pos_bias_u", "pointwise_conv1", "pointwise_conv2", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "pos_bias_u") and module.pos_bias_u is not None: module.pos_bias_u.data.fill_(3) if hasattr(module, "pos_bias_v") and module.pos_bias_v is not None: module.pos_bias_v.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ConformerForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ConformerForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2-conformer", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Wav2Vec2ConformerModel.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") self.assertIsNotNone(model) @require_torch class Wav2Vec2ConformerUtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): # with these settings num_masked_spans=0.5, which means probabilistic rounding # ensures that in 5 out of 10 method calls, num_masked_spans=0, and in # the other 5 out of 10, cases num_masked_spans=1 n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 # as we test for at least 10 masked dimension and at least # 10 non-masked dimension, this test could fail with probability: # P(100 coin flips, at most 9 heads) = 1.66e-18 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) # force one example to be heavily padded attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) # make sure that non-padded examples cannot be padded self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) def test_compute_perplexity(self): probs = torch.arange(100, device=torch_device).reshape(2, 5, 10) / 100 ppl = Wav2Vec2ConformerGumbelVectorQuantizer._compute_perplexity(probs) self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3) # mask half of the input mask = torch.ones((2,), device=torch_device, dtype=torch.bool) mask[0] = 0 ppl = Wav2Vec2ConformerGumbelVectorQuantizer._compute_perplexity(probs, mask) self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3) def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # sample negative indices sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 # second half of last input tensor is padded mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # replace masked feature vectors with -100 to test that those are not sampled features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) # sample negative indices sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertTrue(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @slow class Wav2Vec2ConformerModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter(lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)]) speech_samples = speech_samples[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_inference_ctc_normal_batched_rel_pos(self): model = Wav2Vec2ConformerForCTC.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large-960h-ft") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained( "facebook/wav2vec2-conformer-rel-pos-large-960h-ft", do_lower_case=True ) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loincloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched_rope(self): model = Wav2Vec2ConformerForCTC.from_pretrained("facebook/wav2vec2-conformer-rope-large-960h-ft") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained( "facebook/wav2vec2-conformer-rope-large-960h-ft", do_lower_case=True ) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_pretrained(self): model = Wav2Vec2ConformerForPreTraining.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-conformer-rel-pos-large", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # ... now compare to randomly initialized model config = Wav2Vec2ConformerConfig.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") model_rand = Wav2Vec2ConformerForPreTraining(config).to(torch_device).eval() with torch.no_grad(): outputs_rand = model_rand( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim_rand = torch.cosine_similarity( outputs_rand.projected_states, outputs_rand.projected_quantized_states, dim=-1 ) # retrieve cosine sim of masked features cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices] # a pretrained wav2vec2_conformer model has learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states > 0.5 # a random wav2vec2_conformer model has not learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states is very likely < 0.1 self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0)
transformers/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py/0
{ "file_path": "transformers/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py", "repo_id": "transformers", "token_count": 18096 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.file_utils import is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import ZoeDepthImageProcessor class ZoeDepthImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, ensure_multiple_of=32, keep_aspect_ratio=False, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_pad=False, ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.ensure_multiple_of = ensure_multiple_of self.keep_aspect_ratio = keep_aspect_ratio self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "ensure_multiple_of": self.ensure_multiple_of, "keep_aspect_ratio": self.keep_aspect_ratio, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def expected_output_image_shape(self, images): return self.num_channels, self.ensure_multiple_of, self.ensure_multiple_of def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ZoeDepthImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ZoeDepthImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = ZoeDepthImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "ensure_multiple_of")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_pad")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_ensure_multiple_of(self): # Test variable by turning off all other variables which affect the size, size which is not multiple of 32 image = np.zeros((489, 640, 3)) size = {"height": 380, "width": 513} multiple = 32 image_processor = ZoeDepthImageProcessor( do_pad=False, ensure_multiple_of=multiple, size=size, keep_aspect_ratio=False ) pixel_values = image_processor(image, return_tensors="pt").pixel_values self.assertEqual(list(pixel_values.shape), [1, 3, 384, 512]) self.assertTrue(pixel_values.shape[2] % multiple == 0) self.assertTrue(pixel_values.shape[3] % multiple == 0) # Test variable by turning off all other variables which affect the size, size which is already multiple of 32 image = np.zeros((511, 511, 3)) height, width = 512, 512 size = {"height": height, "width": width} multiple = 32 image_processor = ZoeDepthImageProcessor( do_pad=False, ensure_multiple_of=multiple, size=size, keep_aspect_ratio=False ) pixel_values = image_processor(image, return_tensors="pt").pixel_values self.assertEqual(list(pixel_values.shape), [1, 3, height, width]) self.assertTrue(pixel_values.shape[2] % multiple == 0) self.assertTrue(pixel_values.shape[3] % multiple == 0) def test_keep_aspect_ratio(self): # Test `keep_aspect_ratio=True` by turning off all other variables which affect the size height, width = 489, 640 image = np.zeros((height, width, 3)) size = {"height": 512, "width": 512} image_processor = ZoeDepthImageProcessor(do_pad=False, keep_aspect_ratio=True, size=size, ensure_multiple_of=1) pixel_values = image_processor(image, return_tensors="pt").pixel_values # As can be seen, the image is resized to the maximum size that fits in the specified size self.assertEqual(list(pixel_values.shape), [1, 3, 512, 670]) # Test `keep_aspect_ratio=False` by turning off all other variables which affect the size image_processor = ZoeDepthImageProcessor( do_pad=False, keep_aspect_ratio=False, size=size, ensure_multiple_of=1 ) pixel_values = image_processor(image, return_tensors="pt").pixel_values # As can be seen, the size is respected self.assertEqual(list(pixel_values.shape), [1, 3, size["height"], size["width"]]) # Test `keep_aspect_ratio=True` with `ensure_multiple_of` set image = np.zeros((489, 640, 3)) size = {"height": 511, "width": 511} multiple = 32 image_processor = ZoeDepthImageProcessor(size=size, keep_aspect_ratio=True, ensure_multiple_of=multiple) pixel_values = image_processor(image, return_tensors="pt").pixel_values self.assertEqual(list(pixel_values.shape), [1, 3, 512, 672]) self.assertTrue(pixel_values.shape[2] % multiple == 0) self.assertTrue(pixel_values.shape[3] % multiple == 0)
transformers/tests/models/zoedepth/test_image_processing_zoedepth.py/0
{ "file_path": "transformers/tests/models/zoedepth/test_image_processing_zoedepth.py", "repo_id": "transformers", "token_count": 3055 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from typing import Dict import datasets import numpy as np import requests from datasets import load_dataset from huggingface_hub import ImageSegmentationOutputElement from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, AutoImageProcessor, AutoModelForImageSegmentation, AutoModelForInstanceSegmentation, DetrForSegmentation, ImageSegmentationPipeline, MaskFormerForInstanceSegmentation, is_vision_available, pipeline, ) from transformers.testing_utils import ( compare_pipeline_output_to_hub_spec, is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) white_pixels = (npimg == 255).sum() shape = npimg.shape return {"hash": hashimage(mask), "white_pixels": white_pixels, "shape": shape} def mask_to_test_readable_only_shape(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"shape": shape} @is_pipeline_test @require_vision @require_timm @require_torch class ImageSegmentationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_IMAGE_SEGMENTATION_MAPPING.items()) if MODEL_FOR_IMAGE_SEGMENTATION_MAPPING else []) + (MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING.items() if MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING else []) + (MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items() if MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING else []) ) def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): image_segmenter = ImageSegmentationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, image_segmenter, examples): outputs = image_segmenter( "./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, ) self.assertIsInstance(outputs, list) n = len(outputs) if isinstance(image_segmenter.model, (MaskFormerForInstanceSegmentation, DetrForSegmentation)): # Instance segmentation (maskformer, and detr) have a slot for null class # and can output nothing even with a low threshold self.assertGreaterEqual(n, 0) else: self.assertGreaterEqual(n, 1) # XXX: PIL.Image implements __eq__ which bypasses ANY, so we inverse the comparison # to make it work self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, outputs) # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") # RGBA outputs = image_segmenter(dataset[0]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) # LA outputs = image_segmenter(dataset[1]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) # L outputs = image_segmenter(dataset[2]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) if isinstance(image_segmenter.model, DetrForSegmentation): # We need to test batch_size with images with the same size. # Detr doesn't normalize the size of the images, meaning we can have # 800x800 or 800x1200, meaning we cannot batch simply. # We simply bail on this batch_size = 1 else: batch_size = 2 # 5 times the same image so the output shape is predictable batch = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] outputs = image_segmenter( batch, threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, batch_size=batch_size, ) self.assertEqual(len(batch), len(outputs)) self.assertEqual(len(outputs[0]), n) self.assertEqual( [ [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, ], outputs, f"Expected [{n}, {n}, {n}, {n}, {n}], got {[len(item) for item in outputs]}", ) for single_output in outputs: for output_element in single_output: compare_pipeline_output_to_hub_spec(output_element, ImageSegmentationOutputElement) @require_tf @unittest.skip(reason="Image segmentation not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt_no_panoptic(self): model_id = "hf-internal-testing/tiny-random-mobilevit" # The default task is `image-classification` we need to override pipe = pipeline(task="image-segmentation", model=model_id) # This model does NOT support neither `instance` nor `panoptic` # We should error out with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="panoptic") self.assertEqual( str(e.exception), "Subtask panoptic is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") self.assertEqual( str(e.exception), "Subtask instance is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = ImageSegmentationPipeline( model=model, image_processor=image_processor, subtask="panoptic", threshold=0.0, mask_threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) # This is extremely brittle, and those values are made specific for the CI. self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ], ) output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(output, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) # This must be surprising to the reader. # The `panoptic` returns only LABEL_215, and this returns 3 labels. # output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="semantic") output_masks = [o["mask"] for o in output] # page links (to visualize) expected_masks = [ "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_0.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_1.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_2.png", ] # actual links to get files expected_masks = [x.replace("/blob/", "/resolve/") for x in expected_masks] expected_masks = [Image.open(requests.get(image, stream=True).raw) for image in expected_masks] # Convert masks to numpy array output_masks = [np.array(x) for x in output_masks] expected_masks = [np.array(x) for x in expected_masks] self.assertEqual(output_masks[0].shape, expected_masks[0].shape) self.assertEqual(output_masks[1].shape, expected_masks[1].shape) self.assertEqual(output_masks[2].shape, expected_masks[2].shape) # With un-trained tiny random models, the output `logits` tensor is very likely to contain many values # close to each other, which cause `argmax` to give quite different results when running the test on 2 # environments. We use a lower threshold `0.9` here to avoid flakiness. self.assertGreaterEqual(np.mean(output_masks[0] == expected_masks[0]), 0.9) self.assertGreaterEqual(np.mean(output_masks[1] == expected_masks[1]), 0.9) self.assertGreaterEqual(np.mean(output_masks[2] == expected_masks[2]), 0.9) for o in output: o["mask"] = mask_to_test_readable_only_shape(o["mask"]) self.maxDiff = None self.assertEqual( nested_simplify(output, decimals=4), [ { "label": "LABEL_88", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_101", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_215", "mask": {"shape": (480, 640)}, "score": None, }, ], ) @require_torch def test_small_model_pt_semantic(self): model_id = "hf-internal-testing/tiny-random-beit-pipeline" image_segmenter = pipeline(model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg") for o in outputs: # shortening by hashing o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "LABEL_0", "mask": {"hash": "42d0907228", "shape": (480, 640), "white_pixels": 10714}, }, { "score": None, "label": "LABEL_1", "mask": {"hash": "46b8cc3976", "shape": (480, 640), "white_pixels": 296486}, }, ], ) @require_torch @slow def test_integration_torch_image_segmentation(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline( "image-segmentation", model=model_id, threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) # Shortening by hashing for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ], ) @require_torch @slow def test_threshold(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline("image-segmentation", model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.999) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9995, "label": "remote", "mask": {"hash": "d02404f578", "shape": (480, 640), "white_pixels": 2789}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "eaa115b40c", "shape": (480, 640), "white_pixels": 304411}, }, ], ) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.5) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) @require_torch @slow def test_maskformer(self): threshold = 0.8 model_id = "facebook/maskformer-swin-base-ade" model = AutoModelForInstanceSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline("image-segmentation", model=model, image_processor=image_processor) image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test", trust_remote_code=True) file = image[0]["file"] outputs = image_segmenter(file, threshold=threshold) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9974, "label": "wall", "mask": {"hash": "a547b7c062", "shape": (512, 683), "white_pixels": 14252}, }, { "score": 0.949, "label": "house", "mask": {"hash": "0da9b7b38f", "shape": (512, 683), "white_pixels": 132177}, }, { "score": 0.9995, "label": "grass", "mask": {"hash": "1d07ea0a26", "shape": (512, 683), "white_pixels": 53444}, }, { "score": 0.9976, "label": "tree", "mask": {"hash": "6cdc97c7da", "shape": (512, 683), "white_pixels": 7944}, }, { "score": 0.8239, "label": "plant", "mask": {"hash": "1ab4ce378f", "shape": (512, 683), "white_pixels": 4136}, }, { "score": 0.9942, "label": "road, route", "mask": {"hash": "39c5d17be5", "shape": (512, 683), "white_pixels": 1941}, }, { "score": 1.0, "label": "sky", "mask": {"hash": "a3756324a6", "shape": (512, 683), "white_pixels": 135802}, }, ], ) @require_torch @slow def test_oneformer(self): image_segmenter = pipeline(model="shi-labs/oneformer_ade20k_swin_tiny") image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test", trust_remote_code=True) file = image[0]["file"] outputs = image_segmenter(file, threshold=0.99) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9981, "label": "grass", "mask": {"hash": "3a92904d4c", "white_pixels": 118131, "shape": (512, 683)}, }, { "score": 0.9992, "label": "sky", "mask": {"hash": "fa2300cc9a", "white_pixels": 231565, "shape": (512, 683)}, }, ], ) # Different task outputs = image_segmenter(file, threshold=0.99, subtask="instance") # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9991, "label": "sky", "mask": {"hash": "8b1ffad016", "white_pixels": 230566, "shape": (512, 683)}, }, { "score": 0.9981, "label": "grass", "mask": {"hash": "9bbdf83d3d", "white_pixels": 119130, "shape": (512, 683)}, }, ], ) # Different task outputs = image_segmenter(file, subtask="semantic") # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "wall", "mask": {"hash": "897fb20b7f", "white_pixels": 14506, "shape": (512, 683)}, }, { "score": None, "label": "building", "mask": {"hash": "f2a68c63e4", "white_pixels": 125019, "shape": (512, 683)}, }, { "score": None, "label": "sky", "mask": {"hash": "e0ca3a548e", "white_pixels": 135330, "shape": (512, 683)}, }, { "score": None, "label": "tree", "mask": {"hash": "7c9544bcac", "white_pixels": 16263, "shape": (512, 683)}, }, { "score": None, "label": "road, route", "mask": {"hash": "2c7704e491", "white_pixels": 2143, "shape": (512, 683)}, }, { "score": None, "label": "grass", "mask": {"hash": "bf6c2867e0", "white_pixels": 53040, "shape": (512, 683)}, }, { "score": None, "label": "plant", "mask": {"hash": "93c4b7199e", "white_pixels": 3335, "shape": (512, 683)}, }, { "score": None, "label": "house", "mask": {"hash": "93ec419ad5", "white_pixels": 60, "shape": (512, 683)}, }, ], ) def test_save_load(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline( task="image-segmentation", model=model, image_processor=image_processor, ) with tempfile.TemporaryDirectory() as tmpdirname: image_segmenter.save_pretrained(tmpdirname) pipeline(task="image-segmentation", model=tmpdirname)
transformers/tests/pipelines/test_pipelines_image_segmentation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_segmentation.py", "repo_id": "transformers", "token_count": 15853 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from .test_pipelines_common import ANY if is_torch_available(): import torch from transformers.pipelines.pt_utils import KeyDataset if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class VisualQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa", torch_dtype=torch_dtype, ) examples = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def run_pipeline_test(self, vqa_pipeline, examples): outputs = vqa_pipeline(examples, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_torch def test_small_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question="How many cats are there?", top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) @require_torch @require_torch_accelerator def test_small_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration" ) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": ANY(str)}]] * 2) vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16) outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) @slow @require_torch def test_large_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2, ) @slow @require_torch @require_torch_accelerator def test_large_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="Salesforce/blip2-opt-2.7b", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "Question: how many cats are there? Answer:" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": "two"}]] * 2) @require_torch def test_small_model_pt_image_list(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") images = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000004016.png", ] outputs = vqa_pipeline(image=images, question="How many cats are there?", top_k=1) self.assertEqual( outputs, [[{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}]] ) @require_torch def test_small_model_pt_question_list(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" questions = ["How many cats are there?", "Are there any dogs?"] outputs = vqa_pipeline(image=image, question=questions, top_k=1) self.assertEqual( outputs, [[{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}]] ) @require_torch def test_small_model_pt_both_list(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") images = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000004016.png", ] questions = ["How many cats are there?", "Are there any dogs?"] outputs = vqa_pipeline(image=images, question=questions, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_torch def test_small_model_pt_dataset(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") dataset = load_dataset("hf-internal-testing/dummy_image_text_data", split="train[:2]") question = "What's in the image?" outputs = vqa_pipeline(image=KeyDataset(dataset, "image"), question=question, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_tf @unittest.skip(reason="Visual question answering not implemented in TF") def test_small_model_tf(self): pass
transformers/tests/pipelines/test_pipelines_visual_question_answering.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_visual_question_answering.py", "repo_id": "transformers", "token_count": 4269 }
import gc import unittest from transformers import AutoModelForCausalLM, AutoTokenizer, CompressedTensorsConfig from transformers.testing_utils import require_compressed_tensors, require_torch from transformers.utils import is_torch_available if is_torch_available(): import torch @require_compressed_tensors @require_torch class CompressedTensorsTest(unittest.TestCase): tinyllama_w8a16 = "nm-testing/tinyllama-w8a16-dense-hf-quantizer" tinyllama_w4a16 = "nm-testing/tinyllama-w4a16-compressed-hf-quantizer" tinyllama_w8a8 = "nm-testing/tinyllama-w8a8-compressed-hf-quantizer" llama3_8b_fp8 = "nm-testing/Meta-Llama-3-8B-Instruct-fp8-hf_compat" prompt = "Paris is the capital of which country?" def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def test_config_args(self): with self.assertRaises(ValueError): # passing quant scheme directly is not allowed CompressedTensorsConfig(config_groups={"weights": {"num_bits": 8}}) CompressedTensorsConfig( config_groups={"FP8": ["Linear"]}, ignore=["lm_head"], quantization_status="frozen", sparsity_config={"format": "dense"}, ) def test_config_to_from_dict(self): config = CompressedTensorsConfig(config_groups={"FP8": ["Linear"]}, sparsity_config={"format": "dense"}) config_dict = config.to_dict() config_from_dict = CompressedTensorsConfig.from_dict(config_dict) from compressed_tensors import QuantizationConfig, SparsityCompressionConfig self.assertIsInstance(config_from_dict.quantization_config, QuantizationConfig) self.assertIsInstance(config_from_dict.sparsity_config, SparsityCompressionConfig) def test_tinyllama_w8a8(self): expected_out = "<s> Paris is the capital of which country?\n\n**A) Paris**\n\n**Q** ** Paris is the capital of which country?\n\n**A) Paris**\n\n**Q** ** Paris is the capital of which country" self._test_quantized_model(self.tinyllama_w8a8, expected_out) def test_tinyllama_w4a16(self): expected_out = "<s> Paris is the capital of which country?\nAnswer: Paris is the capital of France.\nQuestion: Which country is the capital of which city?\nAnswer: The capital of the city of New York is New York.\nQuestion: Which" self._test_quantized_model(self.tinyllama_w4a16, expected_out) def test_tinyllama_w8a16(self): expected_out = "<s> Paris is the capital of which country?\nA. France\nB. Germany\nC. Spain\nD. Italy\nE. Switzerland\nQ10. Which of the following is not a country in the European Union?\nA." self._test_quantized_model(self.tinyllama_w8a16, expected_out) def test_llama_8b_fp8(self): expected_out = "<|begin_of_text|>Paris is the capital of which country? France\nWhat is the name of the famous art museum in Paris? The Louvre\nWhat is the name of the famous opera house in Paris? Palais Garnier\nWhat is the name of the" self._test_quantized_model(self.llama3_8b_fp8, expected_out) def _test_quantized_model(self, model_name: str, expected_output: str): """Carry out generation""" quantized_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") tokenizer = AutoTokenizer.from_pretrained(model_name) device = quantized_model.device self.assertIsNotNone( quantized_model.config.quantization_config, "quantization_config should not be None", ) self.assertTrue( any( key for key, tensor in quantized_model.state_dict().items() if "scale" in key and not torch.all(tensor == 1.0) ), "quantized model should load a non-trivial scale into the state dict", ) inputs = tokenizer(self.prompt, return_tensors="pt").to(device) generated_ids = quantized_model.generate(**inputs, max_length=50, do_sample=False) outputs = tokenizer.batch_decode(generated_ids) self.assertIsNotNone(outputs) self.assertEqual(outputs[0], expected_output)
transformers/tests/quantization/compressed_tensor/test_compressed_tensors.py/0
{ "file_path": "transformers/tests/quantization/compressed_tensor/test_compressed_tensors.py", "repo_id": "transformers", "token_count": 1688 }
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class SequenceFeatureExtractionTestMixin(FeatureExtractionSavingTestMixin): # to overwrite at feature extractactor specific tests feat_extract_tester = None feature_extraction_class = None @property def feat_extract_dict(self): return self.feat_extract_tester.prepare_feat_extract_dict() def test_feat_extract_common_properties(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(feat_extract, "feature_size")) self.assertTrue(hasattr(feat_extract, "sampling_rate")) self.assertTrue(hasattr(feat_extract, "padding_value")) def test_batch_feature(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(x) == len(y) for x, y in zip(speech_inputs, processed_features[input_name]))) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="np") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) @require_torch def test_batch_feature_pt(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="pt") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) @require_tf def test_batch_feature_tf(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="tf") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size) ) def _check_padding(self, numpify=False): def _inputs_have_equal_length(input): length = len(input[0]) for input_slice in input[1:]: if len(input_slice) != length: return False return True def _inputs_are_equal(input_1, input_2): if len(input_1) != len(input_2): return False for input_slice_1, input_slice_2 in zip(input_1, input_2): if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3): return False return True feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) pad_diff = self.feat_extract_tester.seq_length_diff pad_max_length = self.feat_extract_tester.max_seq_length + pad_diff pad_min_length = self.feat_extract_tester.min_seq_length batch_size = self.feat_extract_tester.batch_size feature_size = self.feat_extract_tester.feature_size # test padding for List[int] + numpy input_1 = feat_extract.pad(processed_features, padding=False) input_1 = input_1[input_name] input_2 = feat_extract.pad(processed_features, padding="longest") input_2 = input_2[input_name] input_3 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[-1])) input_3 = input_3[input_name] input_4 = feat_extract.pad(processed_features, padding="longest", return_tensors="np") input_4 = input_4[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="max_length")[input_name] input_5 = feat_extract.pad( processed_features, padding="max_length", max_length=pad_max_length, return_tensors="np" ) input_5 = input_5[input_name] self.assertFalse(_inputs_have_equal_length(input_1)) self.assertTrue(_inputs_have_equal_length(input_2)) self.assertTrue(_inputs_have_equal_length(input_3)) self.assertTrue(_inputs_are_equal(input_2, input_3)) self.assertTrue(len(input_1[0]) == pad_min_length) self.assertTrue(len(input_1[1]) == pad_min_length + pad_diff) self.assertTrue(input_4.shape[:2] == (batch_size, len(input_3[0]))) self.assertTrue(input_5.shape[:2] == (batch_size, pad_max_length)) if feature_size > 1: self.assertTrue(input_4.shape[2] == input_5.shape[2] == feature_size) # test padding for `pad_to_multiple_of` for List[int] + numpy input_6 = feat_extract.pad(processed_features, pad_to_multiple_of=10) input_6 = input_6[input_name] input_7 = feat_extract.pad(processed_features, padding="longest", pad_to_multiple_of=10) input_7 = input_7[input_name] input_8 = feat_extract.pad( processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length ) input_8 = input_8[input_name] input_9 = feat_extract.pad( processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length, return_tensors="np", ) input_9 = input_9[input_name] self.assertTrue(all(len(x) % 10 == 0 for x in input_6)) self.assertTrue(_inputs_are_equal(input_6, input_7)) expected_mult_pad_length = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(x) == expected_mult_pad_length for x in input_8)) self.assertEqual(input_9.shape[:2], (batch_size, expected_mult_pad_length)) if feature_size > 1: self.assertTrue(input_9.shape[2] == feature_size) # Check padding value is correct padding_vector_sum = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_2[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3 ) self.assertTrue( abs( np.asarray(input_2[1])[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_2[2])[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_5[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3 ) self.assertTrue( abs(input_9[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length)) < 1e-3 ) def _check_truncation(self, numpify=False): def _inputs_have_equal_length(input): length = len(input[0]) for input_slice in input[1:]: if len(input_slice) != length: return False return True def _inputs_are_equal(input_1, input_2): if len(input_1) != len(input_2): return False for input_slice_1, input_slice_2 in zip(input_1, input_2): if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3): return False return True feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) # truncate to smallest input_1 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), truncation=True ) input_1 = input_1[input_name] input_2 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[0])) input_2 = input_2[input_name] self.assertTrue(_inputs_have_equal_length(input_1)) self.assertFalse(_inputs_have_equal_length(input_2)) # truncate to smallest with np input_3 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), return_tensors="np", truncation=True, ) input_3 = input_3[input_name] input_4 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), return_tensors="np" ) input_4 = input_4[input_name] self.assertTrue(_inputs_have_equal_length(input_3)) self.assertTrue(input_3.shape[1] == len(speech_inputs[0])) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(input_4)) # truncate to middle input_5 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), truncation=True, return_tensors="np", ) input_5 = input_5[input_name] input_6 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), truncation=True ) input_6 = input_6[input_name] input_7 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[1]), return_tensors="np" ) input_7 = input_7[input_name] self.assertTrue(input_5.shape[1] == len(speech_inputs[1])) self.assertTrue(_inputs_have_equal_length(input_5)) self.assertTrue(_inputs_have_equal_length(input_6)) self.assertTrue(_inputs_are_equal(input_5, input_6)) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(input_7)) self.assertTrue(len(input_7[-1]) == len(speech_inputs[-1])) # padding has to be max_length when setting `truncation=True` with self.assertRaises(ValueError): feat_extract.pad(processed_features, truncation=True)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(ValueError): feat_extract.pad(processed_features, padding="max_length", truncation=True)[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy pad_to_multiple_of = 12 input_8 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), pad_to_multiple_of=pad_to_multiple_of, truncation=True, ) input_8 = input_8[input_name] input_9 = feat_extract.pad( processed_features, padding="max_length", max_length=len(speech_inputs[0]), pad_to_multiple_of=pad_to_multiple_of, ) input_9 = input_9[input_name] # retrieve expected_length as multiple of pad_to_multiple_of expected_length = len(speech_inputs[0]) if expected_length % pad_to_multiple_of != 0: expected_length = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_8[0]) == expected_length) self.assertTrue(_inputs_have_equal_length(input_8)) self.assertFalse(_inputs_have_equal_length(input_9)) def test_padding_from_list(self): self._check_padding(numpify=False) def test_padding_from_array(self): self._check_padding(numpify=True) def test_truncation_from_list(self): self._check_truncation(numpify=False) def test_truncation_from_array(self): self._check_truncation(numpify=True) @require_torch def test_padding_accepts_tensors_pt(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_pt = feat_extract.pad(processed_features, padding="longest", return_tensors="pt")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_pt.numpy().astype(np.float32).sum()) < 1e-2) @require_tf def test_padding_accepts_tensors_tf(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_tf = feat_extract.pad(processed_features, padding="longest", return_tensors="tf")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_tf.numpy().astype(np.float32).sum()) < 1e-2) def test_attention_mask(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_lengths = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) processed = feat_extract.pad(processed, padding="longest", return_tensors="np") self.assertIn("attention_mask", processed) self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lengths) def test_attention_mask_with_truncation(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_common() input_lengths = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) max_length = min(input_lengths) processed_pad = feat_extract.pad( processed, padding="max_length", max_length=max_length, truncation=True, return_tensors="np" ) self.assertIn("attention_mask", processed_pad) self.assertListEqual( list(processed_pad.attention_mask.shape), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist(), [max_length for x in speech_inputs] )
transformers/tests/test_sequence_feature_extraction_common.py/0
{ "file_path": "transformers/tests/test_sequence_feature_extraction_common.py", "repo_id": "transformers", "token_count": 7929 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from transformers.dynamic_module_utils import get_imports TOP_LEVEL_IMPORT = """ import os """ IMPORT_IN_FUNCTION = """ def foo(): import os return False """ DEEPLY_NESTED_IMPORT = """ def foo(): def bar(): if True: import os return False return bar() """ TOP_LEVEL_TRY_IMPORT = """ import os try: import bar except ImportError: raise ValueError() """ TRY_IMPORT_IN_FUNCTION = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ MULTIPLE_EXCEPTS_IMPORT = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ EXCEPT_AS_IMPORT = """ import os try: import bar except ImportError as e: raise ValueError() """ GENERIC_EXCEPT_IMPORT = """ import os try: import bar except: raise ValueError() """ MULTILINE_TRY_IMPORT = """ import os try: import bar import baz except ImportError: raise ValueError() """ MULTILINE_BOTH_IMPORT = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ CASES = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case", CASES) def test_import_parsing(tmp_path, case): tmp_file_path = os.path.join(tmp_path, "test_file.py") with open(tmp_file_path, "w") as _tmp_file: _tmp_file.write(case) parsed_imports = get_imports(tmp_file_path) assert parsed_imports == ["os"]
transformers/tests/utils/test_dynamic_module_utils.py/0
{ "file_path": "transformers/tests/utils/test_dynamic_module_utils.py", "repo_id": "transformers", "token_count": 918 }
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import glob import itertools import json import os import os.path import sys import tempfile import threading import unittest import unittest.mock as mock import uuid import warnings from pathlib import Path import requests from huggingface_hub import HfApi, HfFolder from pytest import mark from requests.exceptions import HTTPError from transformers import ( AutoConfig, AutoModel, AutoModelForImageClassification, AutoModelForSequenceClassification, DynamicCache, LlavaForConditionalGeneration, MistralForCausalLM, OwlViTForObjectDetection, PretrainedConfig, is_torch_available, logging, ) from transformers.testing_utils import ( TOKEN, CaptureLogger, LoggingLevel, TemporaryHubRepo, TestCasePlus, is_staging_test, require_accelerate, require_flax, require_safetensors, require_tf, require_torch, require_torch_accelerator, require_torch_multi_accelerator, require_usr_bin_time, slow, torch_device, ) from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from transformers.utils.import_utils import ( is_flash_attn_2_available, is_flax_available, is_tf_available, is_torch_sdpa_available, is_torchdynamo_available, ) sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig, NoSuperInitConfig # noqa E402 if is_torch_available(): import torch from safetensors.torch import save_file as safe_save_file from test_module.custom_modeling import CustomModel, NoSuperInitModel from torch import nn from transformers import ( AutoModelForCausalLM, AutoTokenizer, BertConfig, BertModel, CLIPTextModel, GenerationMixin, PreTrainedModel, T5Config, T5ForConditionalGeneration, ) from transformers.modeling_attn_mask_utils import ( AttentionMaskConverter, _create_4d_causal_attention_mask, _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask, ) from transformers.modeling_utils import ( _find_disjoint, _find_identical, dtype_byte_size, ) from transformers.pytorch_utils import isin_mps_friendly # Fake pretrained models for tests class BaseModel(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) class BaseModelWithTiedWeights(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) def tie_weights(self): self.linear_2.weight = self.linear.weight class ModelWithHead(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) # linear is a common name between Base and Head on purpose. self.linear = nn.Linear(5, 5) self.linear2 = nn.Linear(5, 5) def forward(self, x): return self.linear2(self.linear(self.base(x))) class ModelWithHeadAndTiedWeights(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) self.decoder = nn.Linear(5, 5) def forward(self, x): return self.decoder(self.base(x)) def tie_weights(self): self.decoder.weight = self.base.linear.weight class Prepare4dCausalAttentionMaskModel(nn.Module): def forward(self, inputs_embeds): batch_size, seq_length, _ = inputs_embeds.shape past_key_values_length = 4 attention_mask = _prepare_4d_causal_attention_mask( None, (batch_size, seq_length), inputs_embeds, past_key_values_length ) return attention_mask class Create4dCausalAttentionMaskModel(nn.Module): def forward(self, inputs_embeds): batch_size, seq_length, _ = inputs_embeds.shape past_key_values_length = 4 attention_mask = _create_4d_causal_attention_mask( (batch_size, seq_length), dtype=inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) return attention_mask class Prepare4dAttentionMaskModel(nn.Module): def forward(self, mask, inputs_embeds): attention_mask = _prepare_4d_attention_mask(mask, dtype=inputs_embeds.dtype) return attention_mask class TestOffline(unittest.TestCase): def test_offline(self): # Ugly setup with monkeypatches, amending env vars here is too late as libs have already been imported from huggingface_hub import constants from transformers.utils import hub offlfine_env = hub._is_offline_mode hub_cache_env = constants.HF_HUB_CACHE hub_cache_env1 = constants.HUGGINGFACE_HUB_CACHE default_cache = constants.default_cache_path transformers_cache = hub.TRANSFORMERS_CACHE try: hub._is_offline_mode = True with tempfile.TemporaryDirectory() as tmpdir: LOG.info("Temporary cache dir %s", tmpdir) constants.HF_HUB_CACHE = tmpdir constants.HUGGINGFACE_HUB_CACHE = tmpdir constants.default_cache_path = tmpdir hub.TRANSFORMERS_CACHE = tmpdir # First offline load should fail try: AutoModelForImageClassification.from_pretrained( TINY_IMAGE_CLASSIF, revision="main", use_auth_token=None ) except OSError: LOG.info("Loading model %s in offline mode failed as expected", TINY_IMAGE_CLASSIF) else: self.fail("Loading model {} in offline mode should fail".format(TINY_IMAGE_CLASSIF)) # Download model -> Huggingface Hub not concerned by our offline mode LOG.info("Downloading %s for offline tests", TINY_IMAGE_CLASSIF) hub_api = HfApi() local_dir = hub_api.snapshot_download(TINY_IMAGE_CLASSIF, cache_dir=tmpdir) LOG.info("Model %s downloaded in %s", TINY_IMAGE_CLASSIF, local_dir) AutoModelForImageClassification.from_pretrained( TINY_IMAGE_CLASSIF, revision="main", use_auth_token=None ) finally: # Tear down: reset env as it was before calling this test hub._is_offline_mode = offlfine_env constants.HF_HUB_CACHE = hub_cache_env constants.HUGGINGFACE_HUB_CACHE = hub_cache_env1 constants.default_cache_path = default_cache hub.TRANSFORMERS_CACHE = transformers_cache def test_local_files_only(self): # Ugly setup with monkeypatches, amending env vars here is too late as libs have already been imported from huggingface_hub import constants from transformers.utils import hub hub_cache_env = constants.HF_HUB_CACHE hub_cache_env1 = constants.HUGGINGFACE_HUB_CACHE default_cache = constants.default_cache_path transformers_cache = hub.TRANSFORMERS_CACHE try: with tempfile.TemporaryDirectory() as tmpdir: LOG.info("Temporary cache dir %s", tmpdir) constants.HF_HUB_CACHE = tmpdir constants.HUGGINGFACE_HUB_CACHE = tmpdir constants.default_cache_path = tmpdir hub.TRANSFORMERS_CACHE = tmpdir try: AutoModelForImageClassification.from_pretrained( TINY_IMAGE_CLASSIF, revision="main", use_auth_token=None, local_files_only=True ) except OSError: LOG.info("Loading model %s in offline mode failed as expected", TINY_IMAGE_CLASSIF) else: self.fail("Loading model {} in offline mode should fail".format(TINY_IMAGE_CLASSIF)) LOG.info("Downloading %s for offline tests", TINY_IMAGE_CLASSIF) hub_api = HfApi() local_dir = hub_api.snapshot_download(TINY_IMAGE_CLASSIF, cache_dir=tmpdir) LOG.info("Model %s downloaded in %s", TINY_IMAGE_CLASSIF, local_dir) AutoModelForImageClassification.from_pretrained( TINY_IMAGE_CLASSIF, revision="main", use_auth_token=None, local_files_only=True ) finally: # Tear down: reset env as it was before calling this test constants.HF_HUB_CACHE = hub_cache_env constants.HUGGINGFACE_HUB_CACHE = hub_cache_env1 constants.default_cache_path = default_cache hub.TRANSFORMERS_CACHE = transformers_cache if is_flax_available(): from transformers import FlaxBertModel if is_tf_available(): from transformers import TFBertModel TINY_T5 = "patrickvonplaten/t5-tiny-random" TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification" TINY_MISTRAL = "hf-internal-testing/tiny-random-MistralForCausalLM" TINY_IMAGE_CLASSIF = "hf-internal-testing/tiny-random-SiglipForImageClassification" TINY_LLAVA = "hf-internal-testing/tiny-random-LlavaForConditionalGeneration" LOG = logging.get_logger(__name__) def check_models_equal(model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal @require_torch class ModelUtilsTest(TestCasePlus): def setUp(self): self.old_dtype = torch.get_default_dtype() super().setUp() def tearDown(self): torch.set_default_dtype(self.old_dtype) super().tearDown() @slow def test_model_from_pretrained(self): model_name = "google-bert/bert-base-uncased" config = BertConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, PretrainedConfig) model = BertModel.from_pretrained(model_name) model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, PreTrainedModel) self.assertEqual(len(loading_info["missing_keys"]), 0) self.assertEqual(len(loading_info["unexpected_keys"]), 8) self.assertEqual(len(loading_info["mismatched_keys"]), 0) self.assertEqual(len(loading_info["error_msgs"]), 0) config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) # Not sure this is the intended behavior. TODO fix Lysandre & Thom config.name_or_path = model_name model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) self.assertEqual(model.config.output_hidden_states, True) self.assertEqual(model.config, config) def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_manually_shared_disjointed_tensors_optimum(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) # Let's fuse qkv attn = model.encoder.layer[0].attention.self q = attn.query.weight k = attn.key.weight v = attn.value.weight # Force some shared storage qkv = torch.stack([q, k, v], dim=0) attn.query.weight = torch.nn.Parameter(qkv[0]) attn.key.weight = torch.nn.Parameter(qkv[1]) attn.value.weight = torch.nn.Parameter(qkv[2]) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_with_different_pretrained_model_name(self): model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertIsNotNone(model) logger = logging.get_logger("transformers.configuration_utils") with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: BertModel.from_pretrained(TINY_T5) self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out) @require_accelerate def test_model_from_pretrained_with_none_quantization_config(self): # Needs a device_map for to enter the low_cpu_mem branch. We also load AutoModelForSequenceClassification # deliberately to enter the missing keys branch. model = AutoModelForSequenceClassification.from_pretrained( TINY_MISTRAL, device_map="auto", quantization_config=None ) self.assertIsNotNone(model) def test_model_from_config_torch_dtype(self): # test that the model can be instantiated with dtype of user's choice - as long as it's a # float dtype. To make it happen config.torch_dtype needs to be set before instantiating the # model from the config object. config = T5Config.from_pretrained(TINY_T5) model = AutoModel.from_config(config) # XXX: isn't supported # model = T5ForConditionalGeneration.from_config(config) self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_config(config, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # torch.set_default_dtype() supports only float dtypes, so will fail with non-float type with self.assertRaises(ValueError): model = AutoModel.from_config(config, torch_dtype=torch.int64) def test_model_from_config_torch_dtype_str(self): # test that from_pretrained works with torch_dtype being strings like "float32" for PyTorch backend model = AutoModel.from_pretrained(TINY_T5, torch_dtype="float32") self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_pretrained(TINY_T5, torch_dtype="float16") self.assertEqual(model.dtype, torch.float16) # torch.set_default_dtype() supports only float dtypes, so will fail with non-float type with self.assertRaises(ValueError): model = AutoModel.from_pretrained(TINY_T5, torch_dtype="int64") def test_model_from_config_torch_dtype_composite(self): """ Test that from_pretrained works with torch_dtype being as a dict per each sub-config in composite config Tiny-Llava has saved auto dtype as `torch.float32` for all modules. """ # should be able to set torch_dtype as a simple string and the model loads it correctly model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA, torch_dtype="float32") self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.float32) model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA, torch_dtype=torch.float16) self.assertEqual(model.language_model.dtype, torch.float16) self.assertEqual(model.vision_tower.dtype, torch.float16) # should be able to set torch_dtype as a dict for each sub-config model = LlavaForConditionalGeneration.from_pretrained( TINY_LLAVA, torch_dtype={"text_config": "float32", "vision_config": "float16", "": "bfloat16"} ) self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.float16) self.assertEqual(model.multi_modal_projector.linear_1.weight.dtype, torch.bfloat16) # should be able to set the values as torch.dtype (not str) model = LlavaForConditionalGeneration.from_pretrained( TINY_LLAVA, torch_dtype={"text_config": torch.float32, "vision_config": torch.float16, "": torch.bfloat16} ) self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.float16) self.assertEqual(model.multi_modal_projector.linear_1.weight.dtype, torch.bfloat16) # should be able to set the values in configs directly and pass it to `from_pretrained` config = copy.deepcopy(model.config) config.text_config.torch_dtype = torch.float32 config.vision_config.torch_dtype = torch.bfloat16 config.torch_dtype = torch.float16 model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA, config=config, torch_dtype="auto") self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.bfloat16) self.assertEqual(model.multi_modal_projector.linear_1.weight.dtype, torch.float16) # but if the model has `_keep_in_fp32_modules` then those modules should be in fp32 no matter what LlavaForConditionalGeneration._keep_in_fp32_modules = ["multi_modal_projector"] model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA, config=config, torch_dtype="auto") self.assertEqual(model.language_model.dtype, torch.float32) self.assertEqual(model.vision_tower.dtype, torch.bfloat16) self.assertEqual(model.multi_modal_projector.linear_1.weight.dtype, torch.float32) # torch.set_default_dtype() supports only float dtypes, so will fail with non-float type with self.assertRaises(ValueError): model = LlavaForConditionalGeneration.from_pretrained(TINY_LLAVA, torch_dtype="int64") model = LlavaForConditionalGeneration.from_pretrained( TINY_LLAVA, torch_dtype={"text_config": "float32", "vision_config": "int64", "": "float16"} ) @require_torch def test_model_from_pretrained_meta_device(self): def is_on_meta(model_id, dtype): with torch.device("meta"): model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=dtype) return all(value.device.type == "meta" for value in model.state_dict().values()) model_ids = ("fxmarty/tiny-llama-fast-tokenizer", "fxmarty/small-llama-testing") dtypes = (None, "auto", torch.float16) for model_id, dtype in itertools.product(model_ids, dtypes): self.assertTrue(is_on_meta(model_id, dtype)) def test_model_from_pretrained_torch_dtype(self): # test that the model can be instantiated with dtype of either # 1. explicit from_pretrained's torch_dtype argument # 2. via autodiscovery by looking at model weights (torch_dtype="auto") # so if a model.half() was saved, we want it to be instantiated as such. # # test an explicit model class, but also AutoModel separately as the latter goes through a different code path model_path = self.get_auto_remove_tmp_dir() # baseline - we know TINY_T5 is fp32 model model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertEqual(model.dtype, torch.float32) def remove_torch_dtype(model_path): file = f"{model_path}/config.json" with open(file, "r", encoding="utf-8") as f: s = json.load(f) s.pop("torch_dtype") with open(file, "w", encoding="utf-8") as f: json.dump(s, f) # test the default fp32 save_pretrained => from_pretrained cycle model.save_pretrained(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path) self.assertEqual(model.dtype, torch.float32) # 1. test torch_dtype="auto" via `config.torch_dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) # 2. test torch_dtype="auto" via auto-derivation # now remove the torch_dtype entry from config.json and try "auto" again which should # perform auto-derivation from weights remove_torch_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) # test forced loading in fp16 (even though the weights are in fp32) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with auto-detection model = model.half() model.save_pretrained(model_path) # 1. test torch_dtype="auto" via `config.torch_dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.config.torch_dtype, torch.float16) self.assertEqual(model.dtype, torch.float16) # tests `config.torch_dtype` saving with open(f"{model_path}/config.json") as f: config_dict = json.load(f) self.assertEqual(config_dict["torch_dtype"], "float16") # 2. test torch_dtype="auto" via auto-derivation # now same with using config info remove_torch_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) # 3. now retest that AutoModel behaves the same wrt torch_dtype="auto" as T5ForConditionalGeneration model = AutoModel.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with the explicit fp16 model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test AutoModel separately as it goes through a different path # test auto-detection - as currently TINY_T5 doesn't have torch_dtype entry model = AutoModel.from_pretrained(TINY_T5, torch_dtype="auto") # test that the config object didn't get polluted with torch_dtype="auto" # there was a bug that after this call we ended up with config.torch_dtype=="auto" self.assertNotEqual(model.config.torch_dtype, "auto") # now test the outcome self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_pretrained(TINY_T5, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test model whose first param is not of a floating type, but int model = AutoModel.from_pretrained(TINY_BERT_FOR_TOKEN_CLASSIFICATION, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) def test_model_from_pretrained_attn_implementation(self): # test that the model can be instantiated with attn_implementation of either # 1. explicit from_pretrained's attn_implementation argument # 2. explicit from_pretrained's attn_implementation argument with a config argument attn_implementation_available = ["eager"] if is_torch_sdpa_available(): attn_implementation_available.append("sdpa") if is_flash_attn_2_available(): attn_implementation_available.append("flash_attention_2") for requested_attn_implementation in attn_implementation_available: model = AutoModelForCausalLM.from_pretrained( TINY_MISTRAL, attn_implementation=requested_attn_implementation ) self.assertEqual(model.config._attn_implementation, requested_attn_implementation) config = AutoConfig.from_pretrained(TINY_MISTRAL) model = AutoModelForCausalLM.from_pretrained( TINY_MISTRAL, config=config, attn_implementation=requested_attn_implementation ) self.assertEqual(model.config._attn_implementation, requested_attn_implementation) def test_model_from_config_attn_implementation(self): # test that the model can be instantiated with attn_implementation of either # 1. config created with explicit attn_implementatation and from_config # 2. explicit from_config's attn_implementation argument with a config argument # 3. config created with explicit attn_implementatation and from_config overriding with explicit attn_implementation argument attn_implementation_available = ["eager"] if is_torch_sdpa_available(): attn_implementation_available.append("sdpa") if is_flash_attn_2_available(): attn_implementation_available.append("flash_attention_2") for requested_attn_implementation in attn_implementation_available: config = AutoConfig.from_pretrained(TINY_MISTRAL, attn_implementation=requested_attn_implementation) # Ensure the config was set correctly self.assertEqual(config._attn_implementation, requested_attn_implementation) self.assertEqual(config._attn_implementation_internal, requested_attn_implementation) model = AutoModelForCausalLM.from_config(config) self.assertEqual(model.config._attn_implementation, requested_attn_implementation) config = AutoConfig.from_pretrained(TINY_MISTRAL) # When the config is not set, the default is "eager" self.assertEqual(config._attn_implementation, "eager") self.assertEqual(config._attn_implementation_internal, None) model = AutoModelForCausalLM.from_config(config=config, attn_implementation=requested_attn_implementation) self.assertEqual(model.config._attn_implementation, requested_attn_implementation) # Set a nonsense attn_implementation in the config, which should be overridden by the explicit argument config = AutoConfig.from_pretrained(TINY_MISTRAL, attn_implementation="foo-bar-baz") self.assertEqual(config._attn_implementation, "foo-bar-baz") self.assertEqual(config._attn_implementation_internal, "foo-bar-baz") model = AutoModelForCausalLM.from_config(config=config, attn_implementation=requested_attn_implementation) self.assertEqual(model.config._attn_implementation, requested_attn_implementation) def test_torch_dtype_byte_sizes(self): torch_dtypes_and_bytes = [ (torch.double, 8), (torch.float64, 8), (torch.float, 4), (torch.float32, 4), (torch.half, 2), (torch.float16, 2), (torch.bfloat16, 2), (torch.long, 8), (torch.int64, 8), (torch.int, 4), (torch.int32, 4), (torch.short, 2), (torch.int16, 2), (torch.uint8, 1), (torch.int8, 1), (torch.float8_e4m3fn, 1), (torch.float8_e5m2, 1), (torch.bool, 0.125), ] for torch_dtype, bytes_per_element in torch_dtypes_and_bytes: self.assertEqual(dtype_byte_size(torch_dtype), bytes_per_element) def test_no_super_init_config_and_model(self): config = NoSuperInitConfig(attribute=32) model = NoSuperInitModel(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = NoSuperInitModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_checkpoint_sharding_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["50kB", "100kB", "200kB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size, safe_serialization=False) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".bin"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: state_dict = torch.load(shard_file) self.assertEqual(len(state_dict), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".bin")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_sharding_from_hub(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.parameters(), ref_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_variant_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_variant_local_sharded_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=False) weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["bin"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) @require_safetensors def test_checkpoint_variant_local_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=True) weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["safetensors"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) @require_safetensors def test_checkpoint_variant_local_sharded_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=True) weights_index_name = ".".join(SAFE_WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["safetensors"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_loading_only_safetensors_available(self): # Test that the loading behaviour is as expected when only safetensor checkpoints are available # - We can load the model with use_safetensors=True # - We can load the model without specifying use_safetensors i.e. we search for the available checkpoint, # preferring safetensors # - We cannot load the model with use_safetensors=False model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, max_shard_size="50kB", safe_serialization=True) weights_index_name = ".".join(SAFE_WEIGHTS_INDEX_NAME.split(".")[:-1] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) for i in range(1, 5): weights_name = f"model-0000{i}-of-00005" + ".safetensors" weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) # Setting use_safetensors=False should raise an error as the checkpoint was saved with safetensors=True with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir, use_safetensors=False) # We can load the model with use_safetensors=True new_model = BertModel.from_pretrained(tmp_dir, use_safetensors=True) # We can load the model without specifying use_safetensors new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_loading_only_pytorch_bin_available(self): # Test that the loading behaviour is as expected when only pytorch checkpoints are available # - We can load the model with use_safetensors=False # - We can load the model without specifying use_safetensors i.e. we search for the available checkpoint, # preferring safetensors but falling back to pytorch # - We cannot load the model with use_safetensors=True model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, max_shard_size="50kB", safe_serialization=False) weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) for i in range(1, 5): weights_name = WEIGHTS_NAME.split(".")[0].split("_")[0] + f"_model-0000{i}-of-00005" + ".bin" weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) # Setting use_safetensors=True should raise an error as the checkpoint was saved with safetensors=False with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir, use_safetensors=True) # We can load the model with use_safetensors=False new_model = BertModel.from_pretrained(tmp_dir, use_safetensors=False) # We can load the model without specifying use_safetensors new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) def test_checkpoint_variant_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_hub_sharded(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_sharded_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_save_load_bin(self): with tempfile.TemporaryDirectory() as tmp_dir: model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) # saving will create a variant checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) model.save_pretrained(tmp_dir, safe_serialization=False) # saving shouldn't delete variant checkpoints weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) # there should be a normal checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertIsNotNone(model) @require_accelerate @mark.accelerate_tests def test_from_pretrained_low_cpu_mem_usage_functional(self): # test that we can use `from_pretrained(..., low_cpu_mem_usage=True)` with normal and # sharded models mnames = [ "hf-internal-testing/tiny-random-bert-sharded", "hf-internal-testing/tiny-random-bert", ] for mname in mnames: _ = BertModel.from_pretrained(mname, low_cpu_mem_usage=True) @require_usr_bin_time @require_accelerate @mark.accelerate_tests def test_from_pretrained_low_cpu_mem_usage_equal(self): # Before this would test that `from_pretrained(..., low_cpu_mem_usage=True)` uses less cpu memory than default # Now though these should be around the same. # TODO: Look for good bounds to check that their timings are near the same mname = "hf-internal-testing/tiny-random-bert" preamble = "from transformers import AutoModel" one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=False)' # Save this output as `max_rss_normal` if testing memory results max_rss_normal = self.python_one_liner_max_rss(one_liner_str) # print(f"{max_rss_normal=}") one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=True)' # Save this output as `max_rss_low_mem` if testing memory results max_rss_low_mem = self.python_one_liner_max_rss(one_liner_str) # Should be within 2MBs of each other (overhead) self.assertAlmostEqual( max_rss_normal / 1024 / 1024, max_rss_low_mem / 1024 / 1024, delta=2, msg="using `low_cpu_mem_usage` should incur the same memory usage in both cases.", ) # if you want to compare things manually, let's first look at the size of the model in bytes # model = BertModel.from_pretrained(mname, low_cpu_mem_usage=False) # total_numel = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) # total_bytes = total_numel * 4 # 420MB # Now the diff_bytes should be very close to total_bytes, but the reports are inconsistent. # The easiest way to test this is to switch the model and torch.load to do all the work on # gpu - that way one can measure exactly the total and peak memory used. Perhaps once we add # functionality to load models directly on gpu, this test can be rewritten to use torch's # cuda memory tracking and then we should be able to do a much more precise test. @require_accelerate @mark.accelerate_tests @require_torch_multi_accelerator @slow def test_model_parallelism_gpt2(self): device_map = {"transformer.wte": 0, "transformer.wpe": 0, "lm_head": 0, "transformer.ln_f": 1} for i in range(12): device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1 model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2", device_map=device_map) tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") inputs = tokenizer("Hello, my name is", return_tensors="pt") output = model.generate(inputs["input_ids"].to(f"{torch_device}:0")) text_output = tokenizer.decode(output[0].tolist()) self.assertEqual(text_output, "Hello, my name is John. I'm a writer, and I'm a writer. I'm") @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_from_pretrained_disk_offload_task_model(self): model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "transformer.wte": f"{torch_device}:0", "transformer.wpe": f"{torch_device}:0", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.ln_f": f"{torch_device}:0", "lm_head": f"{torch_device}:0", } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(f"{torch_device}:0") model.save_pretrained(tmp_dir) new_model = AutoModelForCausalLM.from_pretrained(tmp_dir).to(f"{torch_device}:0") outputs1 = new_model.to(f"{torch_device}:0")(inputs) offload_folder = os.path.join(tmp_dir, "offload") new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = new_model_with_offload(inputs) torch.testing.assert_close(outputs1.logits.cpu(), outputs2.logits.cpu()) # With state dict temp offload new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) torch.testing.assert_close(outputs1.logits.cpu(), outputs2.logits.cpu()) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_from_pretrained_disk_offload_derived_to_base_model(self): derived_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "wte": f"{torch_device}:0", "wpe": f"{torch_device}:0", "h.0": "cpu", "h.1": "cpu", "h.2": "cpu", "h.3": "disk", "h.4": "disk", "ln_f": f"{torch_device}:0", } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(f"{torch_device}:0") derived_model.save_pretrained(tmp_dir, use_safetensors=True) base_model = AutoModel.from_pretrained(tmp_dir) outputs1 = base_model.to(f"{torch_device}:0")(inputs) # with disk offload offload_folder = os.path.join(tmp_dir, "offload") base_model_with_offload = AutoModel.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = base_model_with_offload(inputs) torch.testing.assert_close(outputs1[0].cpu(), outputs2[0].cpu()) # With state dict temp offload new_model_with_offload = AutoModel.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) torch.testing.assert_close(outputs1[0].cpu(), outputs2[0].cpu()) @slow @require_torch def test_from_pretrained_non_contiguous_checkpoint(self): # See: https://github.com/huggingface/transformers/pull/28414 # Tiny models on the Hub have contiguous weights, contrarily to google/owlvit model = OwlViTForObjectDetection.from_pretrained("fxmarty/owlvit-tiny-non-contiguous-weight") self.assertTrue(model.owlvit.visual_projection.weight.is_contiguous()) model = OwlViTForObjectDetection.from_pretrained( "fxmarty/owlvit-tiny-non-contiguous-weight", device_map="auto" ) self.assertTrue(model.owlvit.visual_projection.weight.is_contiguous()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=False) model.save_pretrained(tmp_dir, safe_serialization=True) def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() @require_accelerate @mark.accelerate_tests def test_save_model_with_device_map_cpu(self): model_id = "hf-internal-testing/tiny-random-gpt2" inputs = torch.tensor([[1, 2, 3]]) with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cpu") output = model(inputs)[0] model.save_pretrained( tmp_dir, max_shard_size="200KB" ) # model is 1.6MB, max shard size is allocated to cpu by default saved_model = AutoModelForCausalLM.from_pretrained(tmp_dir, device_map="cpu") saved_model_output = saved_model(inputs)[0] torch.testing.assert_close(output, saved_model_output) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_save_offloaded_model(self): device_map = { "transformer.wte": f"{torch_device}:0", "transformer.wpe": f"{torch_device}:0", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.ln_f": f"{torch_device}:0", "lm_head": f"{torch_device}:0", } # check_models_equal requires onloaded tensors model_id = "hf-internal-testing/tiny-random-gpt2" onloaded_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cpu").to(f"{torch_device}:0") inputs = torch.tensor([[1, 2, 3]]).to(f"{torch_device}:0") output = onloaded_model(inputs)[0] with tempfile.TemporaryDirectory() as tmp_dir: offload_folder = os.path.join(tmp_dir, "offload") offloaded_model = AutoModelForCausalLM.from_pretrained( model_id, device_map=device_map, offload_folder=offload_folder ) presaved_output = offloaded_model(inputs)[0] offloaded_model.save_pretrained( tmp_dir, max_shard_size="200KB" ) # model is 1.6MB, max shard size is allocated to cpu by default saved_model = AutoModelForCausalLM.from_pretrained(tmp_dir, device_map=device_map) postsaved_output = saved_model(inputs)[0] torch.testing.assert_close(output, presaved_output, rtol=1e-4, atol=1e-4) torch.testing.assert_close(presaved_output, postsaved_output) @require_safetensors def test_use_safetensors(self): # Should not raise anymore AutoModel.from_pretrained("hf-internal-testing/tiny-random-RobertaModel", use_safetensors=True) # test that error if only safetensors is available with self.assertRaises(OSError) as env_error: BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors", use_safetensors=False) self.assertTrue("does not appear to have a file named pytorch_model.bin" in str(env_error.exception)) # test that only safetensors if both available and use_safetensors=False with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=False, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("bin") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("safetensors") for f in all_downloaded_files)) # test that no safetensors if both available and use_safetensors=True with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=True, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("safetensors") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("bin") for f in all_downloaded_files)) # test no model file found when use_safetensors=None (default when safetensors package available) with self.assertRaises(OSError) as missing_model_file_error: BertModel.from_pretrained("hf-internal-testing/config-no-model") self.assertTrue( "does not appear to have a file named pytorch_model.bin, model.safetensors," in str(missing_model_file_error.exception) ) with self.assertRaises(OSError) as missing_model_file_error: with tempfile.TemporaryDirectory() as tmp_dir: with open(os.path.join(tmp_dir, "config.json"), "w") as f: f.write("{}") f.close() BertModel.from_pretrained(tmp_dir) self.assertTrue( "Error no file named pytorch_model.bin, model.safetensors" in str(missing_model_file_error.exception) ) @require_safetensors def test_safetensors_save_and_load(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) # No pytorch_model.bin file, only a model.safetensors self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) @require_safetensors def test_safetensors_load_from_hub(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): torch.testing.assert_close(p1, p2) @require_safetensors def test_safetensors_save_and_load_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") # No pytorch_model.bin index file, only a model.safetensors index self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # No regular weights file self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): torch.testing.assert_close(p1, p2) @require_safetensors def test_safetensors_load_from_hub_sharded(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): torch.testing.assert_close(p1, p2) def test_base_model_to_head_model_load(self): base_model = BaseModel(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: base_model.save_pretrained(tmp_dir, safe_serialization=False) # Can load a base model in a model with head model = ModelWithHead.from_pretrained(tmp_dir) for p1, p2 in zip(model.base.parameters(), base_model.parameters()): torch.testing.assert_close(p1, p2) # It doesn't work if the state dict has a mix of keys of the head and base without prefix though. base_state_dict = base_model.state_dict() head_state_dict = model.state_dict() base_state_dict["linear2.weight"] = head_state_dict["linear2.weight"] base_state_dict["linear2.bias"] = head_state_dict["linear2.bias"] safe_save_file(base_state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with self.assertRaisesRegex( ValueError, "The state dictionary of the model you are trying to load is corrupted." ): _ = ModelWithHead.from_pretrained(tmp_dir) def test_tied_weights_reload(self): # Base model = BaseModelWithTiedWeights(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = BaseModelWithTiedWeights.from_pretrained(tmp_dir) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) state_dict = model.state_dict() # Remove tied weight from state_dict -> model should load with no complain of missing keys del state_dict["linear_2.weight"] torch.save(state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) new_model, load_info = BaseModelWithTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertListEqual(load_info["missing_keys"], []) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) # With head model.save_pretrained(tmp_dir) new_model, load_info = ModelWithHeadAndTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertIs(new_model.base.linear.weight, new_model.decoder.weight) # Should only complain about the missing bias self.assertListEqual(load_info["missing_keys"], ["decoder.bias"]) def test_unexpected_keys_warnings(self): model = ModelWithHead(PretrainedConfig()) logger = logging.get_logger("transformers.modeling_utils") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # Loading the model with a new class, we don't get a warning for unexpected weights, just an info with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: _, loading_info = BaseModel.from_pretrained(tmp_dir, output_loading_info=True) self.assertNotIn("were not used when initializing ModelWithHead", cl.out) self.assertEqual( set(loading_info["unexpected_keys"]), {"linear.weight", "linear.bias", "linear2.weight", "linear2.bias"}, ) # Loading the model with the same class, we do get a warning for unexpected weights state_dict = model.state_dict() state_dict["added_key"] = copy.deepcopy(state_dict["linear.weight"]) safe_save_file(state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: _, loading_info = ModelWithHead.from_pretrained(tmp_dir, output_loading_info=True) self.assertIn("were not used when initializing ModelWithHead: ['added_key']", cl.out) self.assertEqual(loading_info["unexpected_keys"], ["added_key"]) def test_warn_if_padding_and_no_attention_mask(self): logger = logging.get_logger("transformers.modeling_utils") with self.subTest("Ensure no warnings when pad_token_id is None."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config_no_pad_token = PretrainedConfig() config_no_pad_token.pad_token_id = None model = ModelWithHead(config_no_pad_token) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there is an attention_mask."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there are no pad_token_ids in the input_ids."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[1, 345, 232, 328, 740, 140, 1695, 69, 6078, 2341, 25]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids start with a pad_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids end with a pad_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[432, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure that the warning is shown at most once."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertEqual(cl.out.count("We strongly recommend passing in an `attention_mask`"), 1) with self.subTest("Ensure a different warning is shown when the pad_token_id is equal to the bos_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 config.bos_token_id = config.pad_token_id model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("You may ignore this warning if your `pad_token_id`", cl.out) if not is_torchdynamo_available(): self.skipTest(reason="torchdynamo is not available") with self.subTest("Ensure that the warning code is skipped when compiling with torchdynamo."): logger.warning_once.cache_clear() from torch._dynamo import config, testing config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) def f(input_ids): model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) compile_counter = testing.CompileCounter() opt_fn = torch.compile(f, dynamic=True, backend=compile_counter) opt_fn(input_ids) self.assertEqual(compile_counter.frame_count, 0) @require_torch_accelerator @slow def test_pretrained_low_mem_new_config(self): # Checking for 1 model(the same one which was described in the issue) . model_ids = ["openai-community/gpt2"] for model_id in model_ids: model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path=model_id) model_config.n_layer = 48 model_config.n_head = 25 model_config.n_embd = 1600 model = AutoModelForCausalLM.from_pretrained( pretrained_model_name_or_path=model_id, config=model_config, ignore_mismatched_sizes=True, torch_dtype=torch.float16, low_cpu_mem_usage=True, ) model_ref = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id) self.assertEqual(model.__class__.__name__, model_ref.__class__.__name__) def test_generation_config_is_loaded_with_model(self): # Note: `hf-internal-testing/tiny-random-MistralForCausalLM` has a `generation_config.json` # containing `bos_token_id: 1` # 1. Load without further parameters model = AutoModelForCausalLM.from_pretrained(TINY_MISTRAL) self.assertEqual(model.generation_config.bos_token_id, 1) # 2. Load with `device_map` model = AutoModelForCausalLM.from_pretrained(TINY_MISTRAL, device_map="auto") self.assertEqual(model.generation_config.bos_token_id, 1) @require_safetensors def test_safetensors_torch_from_torch(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_safetensors @require_flax def test_safetensors_torch_from_flax(self): hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(hub_model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_tf @require_safetensors def test_safetensors_torch_from_tf(self): hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(hub_model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_safetensors def test_safetensors_torch_from_torch_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_modifying_model_config_gets_moved_to_generation_config(self): """ Calling `model.save_pretrained` should move the changes made to `generate` parameterization in the model config to the generation config. """ model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") # Initially, the repetition penalty has its default value in `model.config`. The `model.generation_config` will # have the exact same default self.assertTrue(model.config.repetition_penalty == 1.0) self.assertTrue(model.generation_config.repetition_penalty == 1.0) # If the user attempts to save a custom generation parameter: model.config.repetition_penalty = 3.0 with warnings.catch_warnings(record=True) as warning_list: with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # 1 - That parameter will be removed from `model.config`. We don't want to use `model.config` to store # generative parameters, and the old default (1.0) would no longer relect the user's wishes. self.assertTrue(model.config.repetition_penalty is None) # 2 - That parameter will be set in `model.generation_config` instead. self.assertTrue(model.generation_config.repetition_penalty == 3.0) # 3 - The user will see a warning regarding the custom parameter that has been moved. self.assertTrue(len(warning_list) == 1) self.assertTrue("Moving the following attributes" in str(warning_list[0].message)) self.assertTrue("repetition_penalty" in str(warning_list[0].message)) @require_safetensors def test_model_from_pretrained_from_mlx(self): from safetensors import safe_open model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-mistral-mlx") self.assertIsNotNone(model) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) with safe_open(os.path.join(tmp_dir, "model.safetensors"), framework="pt") as f: metadata = f.metadata() self.assertEqual(metadata.get("format"), "pt") new_model = AutoModelForCausalLM.from_pretrained(tmp_dir) input_ids = torch.randint(100, 1000, (1, 10)) with torch.no_grad(): outputs = model(input_ids) outputs_from_saved = new_model(input_ids) torch.testing.assert_close(outputs_from_saved["logits"], outputs["logits"]) def test_warning_for_beta_gamma_parameters(self): class TestGammaBetaNorm(torch.nn.Module): def __init__(self): super().__init__() self.gamma = torch.nn.Parameter(torch.ones(1)) self.beta = torch.nn.Parameter(torch.zeros(1)) def forward(self): return self.gamma.sum() + self.beta.sum() class TestModelGammaBeta(PreTrainedModel): def __init__(self, config): super().__init__(config) self.LayerNorm = TestGammaBetaNorm() self.post_init() def forward(self): return self.LayerNorm() logger = logging.get_logger("transformers.modeling_utils") config = PretrainedConfig() warning_msg_gamma = "`LayerNorm.gamma` -> `LayerNorm.weight`" warning_msg_beta = "`LayerNorm.beta` -> `LayerNorm.bias`" model = TestModelGammaBeta(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) with LoggingLevel(logging.INFO): with CaptureLogger(logger) as cl1: _, loading_info = TestModelGammaBeta.from_pretrained( tmp_dir, config=config, output_loading_info=True ) missing_keys = loading_info["missing_keys"] unexpected_keys = loading_info["unexpected_keys"] self.assertIn("`TestModelGammaBeta`", cl1.out) self.assertIn(warning_msg_gamma, cl1.out) self.assertIn(warning_msg_beta, cl1.out) self.assertIn("LayerNorm.gamma", missing_keys) self.assertIn("LayerNorm.weight", unexpected_keys) self.assertIn("LayerNorm.beta", missing_keys) self.assertIn("LayerNorm.bias", unexpected_keys) def test_isin_mps_friendly(self): """tests that our custom `isin_mps_friendly` matches `torch.isin`""" random_ids = torch.randint(0, 100, (100,)) # We can match against an interger random_test_integer = torch.randint(0, 100, (1,)).item() self.assertTrue( torch.equal( torch.isin(random_ids, random_test_integer), isin_mps_friendly(random_ids, random_test_integer) ) ) # We can match against an 0D tensor random_test_tensor = torch.randint(0, 100, (1,)).squeeze() self.assertTrue( torch.equal(torch.isin(random_ids, random_test_tensor), isin_mps_friendly(random_ids, random_test_tensor)) ) # We can match against an 1D tensor (with many items) random_test_tensor = torch.randint(0, 100, (10,)) self.assertTrue( torch.equal(torch.isin(random_ids, random_test_tensor), isin_mps_friendly(random_ids, random_test_tensor)) ) def test_can_generate(self): """Tests the behavior of `PreTrainedModel.can_generate` method.""" logger = logging.get_logger("transformers.modeling_utils") logger.warning_once.cache_clear() # 1 - By default, a model CAN'T generate can_generate = BertModel.can_generate() self.assertFalse(can_generate) # 2 - The most common case for a model to be able to generate is to inherit from `GenerationMixin` directly class DummyBertWithMixin(BertModel, GenerationMixin): pass with CaptureLogger(logger) as cl: can_generate = DummyBertWithMixin.can_generate() self.assertTrue("" == cl.out) self.assertTrue(can_generate) # 3 - Alternatively, a model can implement a `generate` method class DummyBertWithGenerate(BertModel): def generate(self): pass with CaptureLogger(logger) as cl: can_generate = DummyBertWithGenerate.can_generate() self.assertTrue("" == cl.out) self.assertTrue(can_generate) # 4 - Finally, it can inherit from a model that can generate class DummyBertWithParent(DummyBertWithMixin): pass with CaptureLogger(logger) as cl: can_generate = DummyBertWithParent.can_generate() self.assertTrue("" == cl.out) self.assertTrue(can_generate) # 5 - BC: models with a custom `prepare_inputs_for_generation` can generate (it was assumed they inherited # `GenerationMixin`) class DummyBertWithPrepareInputs(BertModel): def prepare_inputs_for_generation(self): pass with CaptureLogger(logger) as cl: can_generate = DummyBertWithPrepareInputs.can_generate() self.assertTrue("it doesn't directly inherit from `GenerationMixin`" in cl.out) self.assertTrue(can_generate) def test_save_and_load_config_with_custom_generation(self): """ Regression test for the ability to save and load a config with a custom generation kwarg (i.e. a parameter that gets moved to the generation config and reset on the model config) """ model = T5ForConditionalGeneration.from_pretrained(TINY_T5) # The default for `num_beams` is 1 and `early_stopping` is False self.assertTrue(model.config.num_beams == 1) self.assertTrue(model.config.early_stopping is False) # When we save the model, this custom parameter should be moved to the generation config AND the model # config should contain `None` model.config.num_beams = 2 model.config.early_stopping = True self.assertTrue(model.generation_config.num_beams == 1) # unmodified generation config with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = T5ForConditionalGeneration.from_pretrained(tmp_dir) # moved to generation config self.assertTrue(new_model.generation_config.num_beams == 2) self.assertTrue(new_model.generation_config.early_stopping is True) # reset in the model config self.assertTrue(new_model.config.num_beams is None) self.assertTrue(new_model.config.early_stopping is None) # Sanity check: We can run `generate` with the new model without any warnings random_ids = torch.randint(0, 100, (1, 5)) with warnings.catch_warnings(record=True) as w: new_model.generate(random_ids, max_new_tokens=3) self.assertTrue(len(w) == 0) def test_load_model_with_state_dict_only(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") state_dict = model.state_dict() config = model.config model_loaded = BertModel.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) self.assertTrue(check_models_equal(model, model_loaded)) def test_load_model_with_state_dict_only_low_cpu_mem_usage(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") state_dict = model.state_dict() config = model.config model_loaded = BertModel.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict, low_cpu_mem_usage=True ) self.assertTrue(check_models_equal(model, model_loaded)) def test_cache_when_needed_at_train_time(self): """ Some fine-tuning methods require the use of cache, like prefix tuning in PEFT. This test checks that a cache is at train time used if we request it. Related issue: #35648 """ model = AutoModelForCausalLM.from_pretrained(TINY_MISTRAL) tokenizer = AutoTokenizer.from_pretrained(TINY_MISTRAL) model_inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") # By default it is not training, we have to set it self.assertFalse(model.training) model.train() # If we set `use_cache=True` while training, then a cache is returned model_outputs = model(**model_inputs, use_cache=True) self.assertIsInstance(model_outputs.past_key_values, DynamicCache) self.assertTrue(model.training) # simulate injecting virtual tokens like in prefix tuning num_virtual_tokens = 3 past_key_values = [torch.randn(2, 1, 2, num_virtual_tokens, 8)] * 2 past_key_values = DynamicCache.from_legacy_cache(past_key_values) model_inputs["attention_mask"] = torch.cat( ( model_inputs["attention_mask"], torch.ones(1, num_virtual_tokens).to(model_inputs["attention_mask"].device), ), dim=1, ) model_outputs = model(**model_inputs, past_key_values=past_key_values, use_cache=True) self.assertTrue(model.training) # We can also disable the cache to skip a few operations, if the training loop doesn't need cache model_outputs = model(**model_inputs, use_cache=False) self.assertIsNone(model_outputs.past_key_values) self.assertTrue(model.training) def test_restore_default_torch_dtype_from_pretrained(self): """ Tests that the default torch dtype is restored when an error happens during the loading of a model. """ old_dtype = torch.get_default_dtype() # set default type to float32 torch.set_default_dtype(torch.float32) # Mock injection point which is right after the call to `_set_default_torch_dtype` original_set_default_torch_dtype = MistralForCausalLM._set_default_torch_dtype def debug(*args, **kwargs): # call the method as usual, than raise a RuntimeError original_set_default_torch_dtype(*args, **kwargs) raise RuntimeError with mock.patch( "transformers.models.mistral.modeling_mistral.MistralForCausalLM._set_default_torch_dtype", side_effect=debug, ): with self.assertRaises(RuntimeError): _ = AutoModelForCausalLM.from_pretrained(TINY_MISTRAL, device_map="auto", torch_dtype=torch.float16) # default should still be float32 assert torch.get_default_dtype() == torch.float32 torch.set_default_dtype(old_dtype) def test_restore_default_torch_dtype_from_config(self): """ Tests that the default torch dtype is restored when an error happens during the loading of a model. """ old_dtype = torch.get_default_dtype() # set default type to float32 torch.set_default_dtype(torch.float32) config = AutoConfig.from_pretrained( TINY_MISTRAL, ) # Mock injection point which is right after the call to `_set_default_torch_dtype` original_set_default_torch_dtype = MistralForCausalLM._set_default_torch_dtype def debug(*args, **kwargs): # call the method as usual, than raise a RuntimeError original_set_default_torch_dtype(*args, **kwargs) raise RuntimeError with mock.patch( "transformers.models.mistral.modeling_mistral.MistralForCausalLM._set_default_torch_dtype", side_effect=debug, ): with self.assertRaises(RuntimeError): config.torch_dtype = torch.float16 _ = AutoModelForCausalLM.from_config( config, ) # default should still be float32 assert torch.get_default_dtype() == torch.float32 torch.set_default_dtype(old_dtype) def test_unknown_quantization_config(self): with tempfile.TemporaryDirectory() as tmpdir: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) config.quantization_config = {"quant_method": "unknown"} model.save_pretrained(tmpdir) with self.assertLogs("transformers", level="WARNING") as cm: BertModel.from_pretrained(tmpdir) self.assertEqual(len(cm.records), 1) self.assertTrue(cm.records[0].message.startswith("Unknown quantization type, got")) @slow @require_torch class ModelOnTheFlyConversionTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.user = "huggingface-hub-ci" cls.token = os.getenv("HUGGINGFACE_PRODUCTION_USER_TOKEN", None) if cls.token is None: raise ValueError("Cannot run tests as secret isn't setup.") cls.api = HfApi(token=cls.token) def setUp(self) -> None: self.repo_name = f"{self.user}/test-model-on-the-fly-{uuid.uuid4()}" def tearDown(self) -> None: self.api.delete_repo(self.repo_name) def test_safetensors_on_the_fly_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_conversion_private(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name, token=self.token) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_conversion_gated(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) headers = {"Authorization": f"Bearer {self.token}"} requests.put( f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb") converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion_private(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub( self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb", private=True ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion_gated(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, max_shard_size="200kb", safe_serialization=False) headers = {"Authorization": f"Bearer {self.token}"} requests.put( f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") @unittest.skip(reason="Edge case, should work once the Space is updated`") def test_safetensors_on_the_fly_wrong_user_opened_pr(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True) BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) # This should have opened a PR with the user's account with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") # We now switch the repo visibility to public self.api.update_repo_visibility(self.repo_name, private=False) # We once again call from_pretrained, which should call the bot to open a PR BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) bot_opened_pr = None bot_opened_pr_title = None for discussion in discussions: if discussion.author == "SFconvertbot": bot_opened_pr = True bot_opened_pr_title = discussion.title self.assertTrue(bot_opened_pr) self.assertEqual(bot_opened_pr_title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_specific_revision(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) # Push a model on `main` initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) # Push a model on a given revision initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, revision="new-branch") # Try to convert the model on that revision should raise with self.assertRaises(EnvironmentError): BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token, revision="new-branch") def test_absence_of_safetensors_triggers_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) # Push a model on `main` initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) # Download the model that doesn't have safetensors BertModel.from_pretrained(self.repo_name, token=self.token) for thread in threading.enumerate(): if thread.name == "Thread-autoconversion": thread.join(timeout=10) discussions = self.api.get_repo_discussions(self.repo_name) bot_opened_pr = None bot_opened_pr_title = None for discussion in discussions: if discussion.author == "SFconvertbot": bot_opened_pr = True bot_opened_pr_title = discussion.title self.assertTrue(bot_opened_pr) self.assertEqual(bot_opened_pr_title, "Adding `safetensors` variant of this model") @mock.patch("transformers.safetensors_conversion.spawn_conversion") def test_absence_of_safetensors_triggers_conversion_failed(self, spawn_conversion_mock): spawn_conversion_mock.side_effect = HTTPError() config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) # Push a model on `main` initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) # The auto conversion is mocked to always raise; ensure that it doesn't raise in the main thread BertModel.from_pretrained(self.repo_name, token=self.token) @require_torch @is_staging_test class ModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @unittest.skip(reason="This test is flaky") def test_push_to_hub(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub(tmp_repo.repo_id, token=self._token) new_model = BertModel.from_pretrained(tmp_repo.repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @unittest.skip(reason="This test is flaky") def test_push_to_hub_via_save_pretrained(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_model = BertModel.from_pretrained(tmp_repo.repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_with_description(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) COMMIT_DESCRIPTION = """ The commit description supports markdown synthax see: ```python >>> form transformers import AutoConfig >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased") ``` """ commit_details = model.push_to_hub( tmp_repo.repo_id, use_auth_token=self._token, create_pr=True, commit_description=COMMIT_DESCRIPTION ) self.assertEqual(commit_details.commit_description, COMMIT_DESCRIPTION) @unittest.skip(reason="This test is flaky") def test_push_to_hub_in_organization(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub(tmp_repo.repo_id, token=self._token) new_model = BertModel.from_pretrained(tmp_repo.repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @unittest.skip(reason="This test is flaky") def test_push_to_hub_in_organization_via_save_pretrained(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id=tmp_repo.repo_id) new_model = BertModel.from_pretrained(tmp_repo.repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_dynamic_model(self): with TemporaryHubRepo(token=self._token) as tmp_repo: CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) model.push_to_hub(tmp_repo.repo_id, token=self._token) # checks self.assertDictEqual( config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig", "AutoModel": "custom_modeling.CustomModel"}, ) new_model = AutoModel.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) # Can't make an isinstance check because the new_model is from the CustomModel class of a dynamic module self.assertEqual(new_model.__class__.__name__, "CustomModel") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) config = AutoConfig.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) new_model = AutoModel.from_config(config, trust_remote_code=True) self.assertEqual(new_model.__class__.__name__, "CustomModel") def test_push_to_hub_with_tags(self): with TemporaryHubRepo(token=self._token) as tmp_repo: from huggingface_hub import ModelCard new_tags = ["tag-1", "tag-2"] CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) self.assertTrue(model.model_tags is None) model.add_model_tags(new_tags) self.assertTrue(model.model_tags == new_tags) model.push_to_hub(tmp_repo.repo_id, token=self._token) loaded_model_card = ModelCard.load(tmp_repo.repo_id) self.assertEqual(loaded_model_card.data.tags, new_tags) @require_torch class AttentionMaskTester(unittest.TestCase): def check_non_causal(self, bsz, q_len, kv_len, mask_2d, mask_4d): mask_indices = (mask_2d != 1)[:, None].broadcast_to((bsz, q_len, kv_len)) mask_4d_values = mask_4d[:, 0][mask_indices] is_inf = mask_4d_values == -float("inf") is_min = mask_4d_values == torch.finfo(mask_4d.dtype).min assert torch.logical_or(is_inf, is_min).all() def check_to_4d(self, mask_converter, q_len, kv_len, additional_mask=None, bsz=3): mask_2d = torch.ones((bsz, kv_len), device=torch_device, dtype=torch.long) if additional_mask is not None: for bsz_idx, seq_idx in additional_mask: mask_2d[bsz_idx, seq_idx] = 0 mask_4d = mask_converter.to_4d(mask_2d, query_length=q_len, key_value_length=kv_len, dtype=torch.float32) assert mask_4d.shape == (bsz, 1, q_len, kv_len) # make sure there are no overflows assert mask_4d.min() != float("-inf") context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked if 0 in mask_2d: # at least causal mask + maybe more assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) elif not mask_converter.is_causal and context is None: if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == 0 if 0 in mask_2d: self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked if 0 in mask_2d: # at least causal mask + maybe more assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) def check_to_causal(self, mask_converter, q_len, kv_len, bsz=3): mask_4d = mask_converter.to_causal_4d( bsz, query_length=q_len, key_value_length=kv_len, device=torch_device, dtype=torch.float32 ) if q_len == 1 and mask_converter.sliding_window is None: # no causal mask if q_len is 1 assert mask_4d is None return context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked elif not mask_converter.is_causal and context is None: assert (mask_4d != 0).sum().cpu().item() == 0 elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked def compute_num_context_mask(self, kv_len, context, q_len): # This function computes the # of attention tokens that are added for # the sliding window c_mask_len = kv_len - context - 1 num_mask_triangle = c_mask_len * (c_mask_len + 1) // 2 cut_mask_len = max(c_mask_len - q_len, 0) num_cut_mask = cut_mask_len * (cut_mask_len + 1) // 2 return num_mask_triangle - num_cut_mask def test_2d_to_4d_causal(self): mask_converter = AttentionMaskConverter(is_causal=True) # auto-regressive use case self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) # check that the mask does not overflow on causal masked tokens self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 0), (1, 0), (1, 1)]) def test_2d_to_4d(self): mask_converter = AttentionMaskConverter(is_causal=False) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_2d_to_4d_causal_sliding(self): mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=5) # auto-regressive use case self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_causal_mask(self): mask_converter = AttentionMaskConverter(is_causal=True) # auto-regressive use case self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_causal(mask_converter, q_len=7, kv_len=7) def test_causal_mask_sliding(self): mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=3) # auto-regressive use case self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_causal(mask_converter, q_len=7, kv_len=7) def test_torch_compile_fullgraph(self): model = Prepare4dCausalAttentionMaskModel() inputs_embeds = torch.rand([1, 3, 32]) res_non_compiled = model(inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) model = Create4dCausalAttentionMaskModel() inputs_embeds = torch.rand(2, 4, 16) res_non_compiled = model(inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) model = Prepare4dAttentionMaskModel() mask = torch.ones(2, 4) mask[0, :2] = 0 inputs_embeds = torch.rand(2, 4, 16) res_non_compiled = model(mask, inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(mask, inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) @require_torch @slow def test_unmask_unattended_left_padding(self): attention_mask = torch.Tensor([[0, 0, 1], [1, 1, 1], [0, 1, 1]]).to(torch.int64) expanded_mask = torch.Tensor( [ [[[0, 0, 0], [0, 0, 0], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[0, 0, 0], [0, 1, 0], [0, 1, 1]]], ] ).to(torch.int64) reference_output = torch.Tensor( [ [[[1, 1, 1], [1, 1, 1], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[1, 1, 1], [0, 1, 0], [0, 1, 1]]], ] ).to(torch.int64) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=1) self.assertTrue(torch.equal(result, reference_output)) attention_mask = torch.Tensor([[0, 0, 1, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 1]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) min_inf = torch.finfo(torch.float32).min reference_output = torch.Tensor( [ [ [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [min_inf, min_inf, 0, min_inf, min_inf], [min_inf, min_inf, 0, 0, min_inf], [min_inf, min_inf, 0, 0, 0], ] ], [ [ [0, min_inf, min_inf, min_inf, min_inf], [0, 0, min_inf, min_inf, min_inf], [0, 0, 0, min_inf, min_inf], [0, 0, 0, 0, min_inf], [0, 0, 0, 0, 0], ] ], [ [ [0, 0, 0, 0, 0], [min_inf, 0, min_inf, min_inf, min_inf], [min_inf, 0, 0, min_inf, min_inf], [min_inf, 0, 0, 0, min_inf], [min_inf, 0, 0, 0, 0], ] ], ] ) self.assertTrue(torch.equal(reference_output, result)) @require_torch @slow def test_unmask_unattended_right_padding(self): attention_mask = torch.Tensor([[1, 1, 1, 0], [1, 1, 1, 1], [1, 1, 0, 0]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) self.assertTrue(torch.equal(expanded_mask, result)) @require_torch @slow def test_unmask_unattended_random_mask(self): attention_mask = torch.Tensor([[1, 0, 1, 0], [1, 0, 1, 1], [1, 1, 0, 1]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) self.assertTrue(torch.equal(expanded_mask, result)) @require_torch class TestAttentionImplementation(unittest.TestCase): def test_error_no_sdpa_available(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="sdpa") self.assertTrue( "does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention" in str(cm.exception) ) _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") def test_error_no_flash_available(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained( "hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="flash_attention_2" ) self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) def test_error_no_flash_available_with_config(self): with self.assertRaises(ValueError) as cm: config = AutoConfig.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") _ = AutoModel.from_pretrained( "hf-tiny-model-private/tiny-random-MCTCTModel", config=config, attn_implementation="flash_attention_2" ) self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) def test_error_wrong_attn_implementation(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="foo") self.assertTrue('The only possible arguments are `attn_implementation="eager"' in str(cm.exception)) def test_not_available_flash(self): if is_flash_attn_2_available(): self.skipTest(reason="Please uninstall flash-attn package to run test_not_available_flash") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="flash_attention_2" ) self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) def test_not_available_flash_with_config(self): if is_flash_attn_2_available(): self.skipTest(reason="Please uninstall flash-attn package to run test_not_available_flash") config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-GPTBigCodeModel") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", config=config, attn_implementation="flash_attention_2", ) self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) def test_not_available_sdpa(self): if is_torch_sdpa_available(): self.skipTest(reason="This test requires torch<=2.0") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="sdpa" ) self.assertTrue("PyTorch SDPA requirements in Transformers are not met" in str(cm.exception)) @require_torch class TestTensorSharing(TestCasePlus): def test_disjoint(self): main = torch.zeros(10) a = main[:5] b = main[5:] state_dict = {"a": a, "b": b} shared_names, disjoint_names = _find_disjoint([{"a", "b"}], state_dict) self.assertEqual(shared_names, []) self.assertEqual(disjoint_names, ["a", "b"]) a = main[::2] b = main[1::2] state_dict = {"a": a, "b": b} shared_names, disjoint_names = _find_disjoint([{"a", "b"}], state_dict) self.assertEqual(shared_names, [{"a", "b"}]) self.assertEqual(disjoint_names, []) def test_identical(self): a = torch.zeros(10) b = a state_dict = {"a": a, "b": b} shared_names, identical_names = _find_identical([{"a", "b"}], state_dict) self.assertEqual(shared_names, []) self.assertEqual(identical_names, [{"a", "b"}]) b = a[:5] state_dict = {"a": a, "b": b} shared_names, identical_names = _find_identical([{"a", "b"}], state_dict) self.assertEqual(shared_names, [{"a", "b"}]) self.assertEqual(identical_names, [])
transformers/tests/utils/test_modeling_utils.py/0
{ "file_path": "transformers/tests/utils/test_modeling_utils.py", "repo_id": "transformers", "token_count": 55659 }
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is responsible for making sure the dummies in utils/dummies_xxx.py are up to date with the main init. Why dummies? This is to make sure that a user can always import all objects from `transformers`, even if they don't have the necessary extra libs installed. Those objects will then raise helpful error message whenever the user tries to access one of their methods. Usage (from the root of the repo): Check that the dummy files are up to date (used in `make repo-consistency`): ```bash python utils/check_dummies.py ``` Update the dummy files if needed (used in `make fix-copies`): ```bash python utils/check_dummies.py --fix_and_overwrite ``` """ import argparse import os import re from typing import Dict, List, Optional # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py PATH_TO_TRANSFORMERS = "src/transformers" # Matches is_xxx_available() _re_backend = re.compile(r"is\_([a-z_]*)_available()") # Matches from xxx import bla _re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Matches if not is_xxx_available() _re_test_backend = re.compile(r"^\s+if\s+not\s+\(?is\_[a-z_]*\_available\(\)") # Template for the dummy objects. DUMMY_CONSTANT = """ {0} = None """ DUMMY_CLASS = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ DUMMY_FUNCTION = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def find_backend(line: str) -> Optional[str]: """ Find one (or multiple) backend in a code line of the init. Args: line (`str`): A code line in an init file. Returns: Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so `xxx_and_yyy` for instance). """ if _re_test_backend.search(line) is None: return None backends = [b[0] for b in _re_backend.findall(line)] backends.sort() return "_and_".join(backends) def read_init() -> Dict[str, List[str]]: """ Read the init and extract backend-specific objects. Returns: Dict[str, List[str]]: A dictionary mapping backend name to the list of object names requiring that backend. """ with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Get to the point we do the actual imports for type checking line_index = 0 while not lines[line_index].startswith("if TYPE_CHECKING"): line_index += 1 backend_specific_objects = {} # Go through the end of the file while line_index < len(lines): # If the line is an if is_backend_available, we grab all objects associated. backend = find_backend(lines[line_index]) if backend is not None: while not lines[line_index].startswith(" else:"): line_index += 1 line_index += 1 objects = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): line = lines[line_index] single_line_import_search = _re_single_line_import.search(line) if single_line_import_search is not None: # Single-line imports objects.extend(single_line_import_search.groups()[0].split(", ")) elif line.startswith(" " * 12): # Multiple-line imports (with 3 indent level) objects.append(line[12:-2]) line_index += 1 backend_specific_objects[backend] = objects else: line_index += 1 return backend_specific_objects def create_dummy_object(name: str, backend_name: str) -> str: """ Create the code for a dummy object. Args: name (`str`): The name of the object. backend_name (`str`): The name of the backend required for that object. Returns: `str`: The code of the dummy object. """ if name.isupper(): return DUMMY_CONSTANT.format(name) elif name.islower(): return DUMMY_FUNCTION.format(name, backend_name) else: return DUMMY_CLASS.format(name, backend_name) def create_dummy_files(backend_specific_objects: Optional[Dict[str, List[str]]] = None) -> Dict[str, str]: """ Create the content of the dummy files. Args: backend_specific_objects (`Dict[str, List[str]]`, *optional*): The mapping backend name to list of backend-specific objects. If not passed, will be obtained by calling `read_init()`. Returns: `Dict[str, str]`: A dictionary mapping backend name to code of the corresponding backend file. """ if backend_specific_objects is None: backend_specific_objects = read_init() dummy_files = {} for backend, objects in backend_specific_objects.items(): backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]" dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects]) dummy_files[backend] = dummy_file return dummy_files def check_dummies(overwrite: bool = False): """ Check if the dummy files are up to date and maybe `overwrite` with the right content. Args: overwrite (`bool`, *optional*, default to `False`): Whether or not to overwrite the content of the dummy files. Will raise an error if they are not up to date when `overwrite=False`. """ dummy_files = create_dummy_files() # For special correspondence backend name to shortcut as used in utils/dummy_xxx_objects.py short_names = {"torch": "pt"} # Locate actual dummy modules and read their content. path = os.path.join(PATH_TO_TRANSFORMERS, "utils") dummy_file_paths = { backend: os.path.join(path, f"dummy_{short_names.get(backend, backend)}_objects.py") for backend in dummy_files.keys() } actual_dummies = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(file_path): with open(file_path, "r", encoding="utf-8", newline="\n") as f: actual_dummies[backend] = f.read() else: actual_dummies[backend] = "" # Compare actual with what they should be. for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main " "__init__ has new objects." ) with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n") as f: f.write(dummy_files[backend]) else: raise ValueError( "The main __init__ has objects that are not present in " f"transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` " "to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_dummies(args.fix_and_overwrite)
transformers/utils/check_dummies.py/0
{ "file_path": "transformers/utils/check_dummies.py", "repo_id": "transformers", "token_count": 3338 }
import argparse import math import traceback import dateutil.parser as date_parser import requests def extract_time_from_single_job(job): """Extract time info from a single job in a GitHub Actions workflow run""" job_info = {} start = job["started_at"] end = job["completed_at"] start_datetime = date_parser.parse(start) end_datetime = date_parser.parse(end) duration_in_min = round((end_datetime - start_datetime).total_seconds() / 60.0) job_info["started_at"] = start job_info["completed_at"] = end job_info["duration"] = duration_in_min return job_info def get_job_time(workflow_run_id, token=None): """Extract time info for all jobs in a GitHub Actions workflow run""" headers = None if token is not None: headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" result = requests.get(url, headers=headers).json() job_time = {} try: job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]}) pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) for i in range(pages_to_iterate_over): result = requests.get(url + f"&page={i + 2}", headers=headers).json() job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]}) return job_time except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}") return {} if __name__ == "__main__": r""" Example: python get_github_job_time.py --workflow_run_id 2945609517 """ parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") args = parser.parse_args() job_time = get_job_time(args.workflow_run_id) job_time = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'{k}: {v["duration"]}')
transformers/utils/get_github_job_time.py/0
{ "file_path": "transformers/utils/get_github_job_time.py", "repo_id": "transformers", "token_count": 835 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import requests if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--workflow_id", type=str, required=True) args = parser.parse_args() workflow_id = args.workflow_id r = requests.get( f"https://circleci.com/api/v2/workflow/{workflow_id}/job", headers={"Circle-Token": os.environ.get("CIRCLE_TOKEN", "")}, ) jobs = r.json()["items"] os.makedirs("outputs", exist_ok=True) workflow_summary = {} # for each job, download artifacts for job in jobs: project_slug = job["project_slug"] if job["name"].startswith(("tests_", "examples_", "pipelines_")): url = f'https://circleci.com/api/v2/project/{project_slug}/{job["job_number"]}/artifacts' r = requests.get(url, headers={"Circle-Token": os.environ.get("CIRCLE_TOKEN", "")}) job_artifacts = r.json()["items"] os.makedirs(job["name"], exist_ok=True) os.makedirs(f'outputs/{job["name"]}', exist_ok=True) job_test_summaries = {} for artifact in job_artifacts: if artifact["path"].startswith("reports/") and artifact["path"].endswith("/summary_short.txt"): node_index = artifact["node_index"] url = artifact["url"] r = requests.get(url, headers={"Circle-Token": os.environ.get("CIRCLE_TOKEN", "")}) test_summary = r.text job_test_summaries[node_index] = test_summary summary = {} for node_index, node_test_summary in job_test_summaries.items(): for line in node_test_summary.splitlines(): if line.startswith("PASSED "): test = line[len("PASSED ") :] summary[test] = "passed" elif line.startswith("FAILED "): test = line[len("FAILED ") :].split()[0] summary[test] = "failed" # failed before passed summary = dict(sorted(summary.items(), key=lambda x: (x[1], x[0]))) workflow_summary[job["name"]] = summary # collected version with open(f'outputs/{job["name"]}/test_summary.json', "w") as fp: json.dump(summary, fp, indent=4) new_workflow_summary = {} for job_name, job_summary in workflow_summary.items(): for test, status in job_summary.items(): if test not in new_workflow_summary: new_workflow_summary[test] = {} new_workflow_summary[test][job_name] = status for test, result in new_workflow_summary.items(): new_workflow_summary[test] = dict(sorted(result.items())) new_workflow_summary = dict(sorted(new_workflow_summary.items())) with open("outputs/test_summary.json", "w") as fp: json.dump(new_workflow_summary, fp, indent=4)
transformers/utils/process_circleci_workflow_test_reports.py/0
{ "file_path": "transformers/utils/process_circleci_workflow_test_reports.py", "repo_id": "transformers", "token_count": 1573 }
cff-version: 1.2.0 title: 'TRL: Transformer Reinforcement Learning' message: >- If you use this software, please cite it using the metadata from this file. type: software authors: - given-names: Leandro family-names: von Werra - given-names: Younes family-names: Belkada - given-names: Lewis family-names: Tunstall - given-names: Edward family-names: Beeching - given-names: Tristan family-names: Thrush - given-names: Nathan family-names: Lambert - given-names: Shengyi family-names: Huang - given-names: Kashif family-names: Rasul - given-names: Quentin family-names: GallouΓ©dec repository-code: 'https://github.com/huggingface/trl' abstract: "With trl you can train transformer language models with Proximal Policy Optimization (PPO). The library is built on top of the transformers library by \U0001F917 Hugging Face. Therefore, pre-trained language models can be directly loaded via transformers. At this point, most decoder and encoder-decoder architectures are supported." keywords: - rlhf - deep-learning - pytorch - transformers license: Apache-2.0 version: 0.14
trl/CITATION.cff/0
{ "file_path": "trl/CITATION.cff", "repo_id": "trl", "token_count": 369 }
# Command Line Interfaces (CLIs) You can use TRL to fine-tune your Language Model with Supervised Fine-Tuning (SFT) or Direct Policy Optimization (DPO) or even chat with your model using the TRL CLIs. Currently supported CLIs are: #### Training commands - `trl dpo`: fine-tune a LLM with DPO - `trl grpo`: fine-tune a LLM with GRPO - `trl kto`: fine-tune a LLM with KTO - `trl sft`: fine-tune a LLM with SFT #### Other commands - `trl chat`: quickly spin up an LLM fine-tuned for chatting - `trl env`: get the system information ## Fine-tuning with the CLI Before getting started, pick up a Language Model from Hugging Face Hub. Supported models can be found with the filter "text-generation" within models. Also make sure to pick up a relevant dataset for your task. Before using the `sft` or `dpo` commands make sure to run: ```bash accelerate config ``` and pick up the right configuration for your training setup (single / multi-GPU, DeepSpeed, etc.). Make sure to complete all steps of `accelerate config` before running any CLI command. We also recommend you passing a YAML config file to configure your training protocol. Below is a simple example of a YAML file that you can use for training your models with `trl sft` command. ```yaml model_name_or_path: Qwen/Qwen2.5-0.5B dataset_name: stanfordnlp/imdb report_to: none learning_rate: 0.0001 lr_scheduler_type: cosine ``` Save that config in a `.yaml` and get started immediately! An example CLI config is available as `examples/cli_configs/example_config.yaml`. Note you can overwrite the arguments from the config file by explicitly passing them to the CLI, e.g. from the root folder: ```bash trl sft --config examples/cli_configs/example_config.yaml --output_dir test-trl-cli --lr_scheduler_type cosine_with_restarts ``` Will force-use `cosine_with_restarts` for `lr_scheduler_type`. ### Supported Arguments We do support all arguments from `transformers.TrainingArguments`, for loading your model, we support all arguments from `~trl.ModelConfig`: [[autodoc]] ModelConfig You can pass any of these arguments either to the CLI or the YAML file. ### Supervised Fine-tuning (SFT) Follow the basic instructions above and run `trl sft --output_dir <output_dir> <*args>`: ```bash trl sft --model_name_or_path facebook/opt-125m --dataset_name stanfordnlp/imdb --output_dir opt-sft-imdb ``` The SFT CLI is based on the `trl/scripts/sft.py` script. ### Direct Policy Optimization (DPO) To use the DPO CLI, you need to have a dataset in the TRL format such as * TRL's Anthropic HH dataset: https://huggingface.co/datasets/trl-internal-testing/hh-rlhf-helpful-base-trl-style * TRL's OpenAI TL;DR summarization dataset: https://huggingface.co/datasets/trl-internal-testing/tldr-preference-trl-style These datasets always have at least three columns `prompt, chosen, rejected`: * `prompt` is a list of strings. * `chosen` is the chosen response in [chat format](https://huggingface.co/docs/transformers/main/en/chat_templating) * `rejected` is the rejected response [chat format](https://huggingface.co/docs/transformers/main/en/chat_templating) To do a quick start, you can run the following command: ```bash trl dpo --model_name_or_path facebook/opt-125m --output_dir trl-hh-rlhf --dataset_name trl-internal-testing/hh-rlhf-helpful-base-trl-style ``` The DPO CLI is based on the `trl/scripts/dpo.py` script. #### Custom preference dataset Format the dataset into TRL format (you can adapt the `examples/datasets/anthropic_hh.py`): ```bash python examples/datasets/anthropic_hh.py --push_to_hub --hf_entity your-hf-org ``` ## Chat interface The chat CLI lets you quickly load the model and talk to it. Simply run the following: <pre><code>$ trl chat --model_name_or_path Qwen/Qwen1.5-0.5B-Chat <strong><span style="color: red;">&lt;quentin_gallouedec&gt;:</span></strong> What is the best programming language? <strong><span style="color: blue;">&lt;Qwen/Qwen1.5-0.5B-Chat&gt;:</span></strong> There isn't a "best" programming language, as everyone has different style preferences, needs, and preferences. However, some people commonly use languages like Python, Java, C++, and JavaScript, which are popular among developers for a variety of reasons, including readability, flexibility, and scalability. Ultimately, it depends on personal preference, needs, and goals. </code></pre> Note that the chat interface relies on the tokenizer's [chat template](https://huggingface.co/docs/transformers/chat_templating) to format the inputs for the model. Make sure your tokenizer has a chat template defined. Besides talking to the model there are a few commands you can use: - `clear`: clears the current conversation and start a new one - `example {NAME}`: load example named `{NAME}` from the config and use it as the user input - `set {SETTING_NAME}={SETTING_VALUE};`: change the system prompt or generation settings (multiple settings are separated by a `;`). - `reset`: same as clear but also resets the generation configs to defaults if they have been changed by `set` - `save` or `save {SAVE_NAME}`: save the current chat and settings to file by default to `./chat_history/{MODEL_NAME}/chat_{DATETIME}.yaml` or `{SAVE_NAME}` if provided - `exit`: closes the interface ## Getting the system information You can get the system information by running the following command: ```bash trl env ``` This will print out the system information including the GPU information, the CUDA version, the PyTorch version, the transformers version, and the TRL version, and any optional dependencies that are installed. ```txt Copy-paste the following information when reporting an issue: - Platform: Linux-5.15.0-1048-aws-x86_64-with-glibc2.31 - Python version: 3.11.9 - PyTorch version: 2.4.1 - CUDA device: NVIDIA H100 80GB HBM3 - Transformers version: 4.45.0.dev0 - Accelerate version: 0.34.2 - Accelerate config: - compute_environment: LOCAL_MACHINE - distributed_type: DEEPSPEED - mixed_precision: no - use_cpu: False - debug: False - num_processes: 4 - machine_rank: 0 - num_machines: 1 - rdzv_backend: static - same_network: True - main_training_function: main - enable_cpu_affinity: False - deepspeed_config: {'gradient_accumulation_steps': 4, 'offload_optimizer_device': 'none', 'offload_param_device': 'none', 'zero3_init_flag': False, 'zero_stage': 2} - downcast_bf16: no - tpu_use_cluster: False - tpu_use_sudo: False - tpu_env: [] - Datasets version: 3.0.0 - HF Hub version: 0.24.7 - TRL version: 0.12.0.dev0+acb4d70 - bitsandbytes version: 0.41.1 - DeepSpeed version: 0.15.1 - Diffusers version: 0.30.3 - Liger-Kernel version: 0.3.0 - LLM-Blender version: 0.0.2 - OpenAI version: 1.46.0 - PEFT version: 0.12.0 ``` This information are required when reporting an issue.
trl/docs/source/clis.md/0
{ "file_path": "trl/docs/source/clis.md", "repo_id": "trl", "token_count": 2201 }
# Iterative Trainer [![](https://img.shields.io/badge/All_models-Iterative_SFT-blue)](https://huggingface.co/models?other=iterative-sft,trl) Iterative fine-tuning is a training method that enables to perform custom actions (generation and filtering for example) between optimization steps. In TRL we provide an easy-to-use API to fine-tune your models in an iterative way in just a few lines of code. ## Usage To get started quickly, instantiate an instance a model, and a tokenizer. ```python model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token trainer = IterativeSFTTrainer( model, tokenizer ) ``` You have the choice to either provide a list of strings or a list of tensors to the step function. #### Using a list of tensors as input: ```python inputs = { "input_ids": input_ids, "attention_mask": attention_mask } trainer.step(**inputs) ``` #### Using a list of strings as input: ```python inputs = { "texts": texts } trainer.step(**inputs) ``` For causal language models, labels will automatically be created from input_ids or from texts. When using sequence to sequence models you will have to provide your own labels or text_labels. ## IterativeTrainer [[autodoc]] IterativeSFTTrainer
trl/docs/source/iterative_sft_trainer.md/0
{ "file_path": "trl/docs/source/iterative_sft_trainer.md", "repo_id": "trl", "token_count": 441 }
# Reward Modeling [![](https://img.shields.io/badge/All_models-Reward_Trainer-blue)](https://huggingface.co/models?other=reward-trainer,trl) TRL supports custom reward modeling for anyone to perform reward modeling on their dataset and model. Check out a complete flexible example at [`examples/scripts/reward_modeling.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/reward_modeling.py). ## Expected dataset type The [`RewardTrainer`] requires a [*implicit prompt* preference dataset](dataset_formats#preference). It means that the dataset should only contain the columns `"chosen"` and `"rejected"` (and not `"prompt"`). The [`RewardTrainer`] supports both [conversational](dataset_formats#conversational) and [standard](dataset_formats#standard) dataset format. When provided with a conversational dataset, the trainer will automatically apply the chat template to the dataset. You can also use a pretokenized dataset, in which case the dataset should contain the following columns: `input_ids_chosen`, `attention_mask_chosen`, `input_ids_rejected` and `attention_mask_rejected`. ## Using the `RewardTrainer` After preparing your dataset, you can use the [`RewardTrainer`] in the same way as the `Trainer` class from πŸ€— Transformers. You should pass an `AutoModelForSequenceClassification` model to the [`RewardTrainer`], along with a [`RewardConfig`] which configures the hyperparameters of the training. ### Leveraging πŸ€— PEFT to train a reward model Just pass a `peft_config` in the keyword arguments of [`RewardTrainer`], and the trainer should automatically take care of converting the model into a PEFT model! ```python from peft import LoraConfig, TaskType from transformers import AutoModelForSequenceClassification, AutoTokenizer from trl import RewardTrainer, RewardConfig model = AutoModelForSequenceClassification.from_pretrained("gpt2") peft_config = LoraConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, ) ... trainer = RewardTrainer( model=model, args=training_args, processing_class=tokenizer, train_dataset=dataset, peft_config=peft_config, ) trainer.train() ``` ### Adding a margin to the loss As in the [Llama 2 paper](https://huggingface.co/papers/2307.09288), you can add a margin to the loss by adding a `margin` column to the dataset. The reward collator will automatically pass it through and the loss will be computed accordingly. ```python def add_margin(row): # Assume you have a score_chosen and score_rejected columns that you want to use to compute the margin return {'margin': row['score_chosen'] - row['score_rejected']} dataset = dataset.map(add_margin) ``` ### Centering rewards In many scenarios, it's preferable to ensure that a reward model's output is mean zero. This is often done by first calculating the model's average score and then subtracting it. [[Eisenstein et al., 2023]](https://huggingface.co/papers/2312.09244) proposed an auxiliary loss function designed to directly learn a centered reward model. This auxiliary loss minimizes the squared sum of the rewards, encouraging the model to naturally produce mean-zero outputs: $$\Big( R(p, r_1) + R(p, r_2) \Big)^2 $$ This auxiliary loss is combined with the main loss function, weighted by the parameter `center_rewards_coefficient` in the `[RewardConfig]`. By default, this feature is deactivated (`center_rewards_coefficient = None`). ```python training_args = RewardConfig( center_rewards_coefficient=0.01, ... ) ``` For reference results, please refer PR [#1932](https://github.com/huggingface/trl/pull/1932). ## RewardTrainer [[autodoc]] RewardTrainer ## RewardConfig [[autodoc]] RewardConfig
trl/docs/source/reward_trainer.md/0
{ "file_path": "trl/docs/source/reward_trainer.md", "repo_id": "trl", "token_count": 1139 }
<jupyter_start><jupyter_text>Tune GPT2 to generate positive reviews> Optimise GPT2 to produce positive IMDB movie reviews using a BERT sentiment classifier as a reward function. Figure: Experiment setup to tune GPT2. The yellow arrows are outside the scope of this notebook, but the trained models are available through Hugging Face. In this notebook we fine-tune GPT2 (small) to generate positive movie reviews based on the IMDB dataset. The model gets the start of a real review and is tasked to produce positive continuations. To reward positive continuations we use a BERT classifier to analyse the sentiment of the produced sentences and use the classifier's outputs as rewards signals for PPO training. Setup experiment Import dependencies<jupyter_code>%load_ext autoreload %autoreload 2 %pip install transformers trl wandb import torch from tqdm import tqdm import pandas as pd tqdm.pandas() from transformers import pipeline, AutoTokenizer from datasets import load_dataset from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead from trl.core import LengthSampler<jupyter_output><empty_output><jupyter_text>Configuration<jupyter_code>config = PPOConfig( model_name="lvwerra/gpt2-imdb", learning_rate=1.41e-5, log_with="wandb", ) sent_kwargs = {"top_k": None, "function_to_apply": "none", "batch_size": 16} import wandb wandb.init()<jupyter_output><empty_output><jupyter_text>You can see that we load a GPT2 model called `gpt2_imdb`. This model was additionally fine-tuned on the IMDB dataset for 1 epoch with the huggingface [script](https://github.com/huggingface/transformers/blob/main/examples/legacy/run_language_modeling.py) (no special settings). The other parameters are mostly taken from the original paper ["Fine-Tuning Language Models from Human Preferences"](https://huggingface.co/papers/1909.08593). This model as well as the BERT model is available in the Huggingface model zoo [here](https://huggingface.co/models). The following code should automatically download the models. Load data and models Load IMDB datasetThe IMDB dataset contains 50k movie review annotated with "positive"/"negative" feedback indicating the sentiment. We load the IMDB dataset into a DataFrame and filter for comments that are at least 200 characters. Then we tokenize each text and cut it to random size with the `LengthSampler`.<jupyter_code>def build_dataset( config, dataset_name="stanfordnlp/imdb", input_min_text_length=2, input_max_text_length=8, ): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: dataset_name (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token # load imdb with datasets ds = load_dataset(dataset_name, split="train") ds = ds.rename_columns({"text": "review"}) ds = ds.filter(lambda x: len(x["review"]) > 200, batched=False) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): sample["input_ids"] = tokenizer.encode(sample["review"])[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, batched=False) ds.set_format(type="torch") return ds dataset = build_dataset(config) def collator(data): return dict((key, [d[key] for d in data]) for key in data[0])<jupyter_output><empty_output><jupyter_text>Load pre-trained GPT2 language models We load the GPT2 model with a value head and the tokenizer. We load the model twice; the first model is optimized while the second model serves as a reference to calculate the KL-divergence from the starting point. This serves as an additional reward signal in the PPO training to make sure the optimized model does not deviate too much from the original language model.<jupyter_code>model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name) ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name) tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token<jupyter_output><empty_output><jupyter_text>Initialize PPOTrainerThe `PPOTrainer` takes care of device placement and optimization later on:<jupyter_code>ppo_trainer = PPOTrainer( config, model, ref_model, tokenizer, dataset=dataset, data_collator=collator )<jupyter_output><empty_output><jupyter_text>Load BERT classifierWe load a BERT classifier fine-tuned on the IMDB dataset.<jupyter_code>device = ppo_trainer.accelerator.device if ppo_trainer.accelerator.num_processes == 1: device = 0 if torch.cuda.is_available() else "cpu" # to avoid a `pipeline` bug sentiment_pipe = pipeline( "sentiment-analysis", model="lvwerra/distilbert-imdb", device=device )<jupyter_output><empty_output><jupyter_text>The model outputs are the logits for the negative and positive class. We will use the logits for positive class as a reward signal for the language model.<jupyter_code>text = "this movie was really bad!!" sentiment_pipe(text, **sent_kwargs) text = "this movie was really good!!" sentiment_pipe(text, **sent_kwargs)<jupyter_output><empty_output><jupyter_text>Generation settingsFor the response generation we just use sampling and make sure top-k and nucleus sampling are turned off as well as a minimal length.<jupyter_code>gen_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, }<jupyter_output><empty_output><jupyter_text>Optimize model Training loop The training loop consists of the following main steps:1. Get the query responses from the policy network (GPT-2)2. Get sentiments for query/responses from BERT3. Optimize policy with PPO using the (query, response, reward) triplet**Training time**This step takes **~2h** on a V100 GPU with the above specified settings.<jupyter_code>output_min_length = 4 output_max_length = 16 output_length_sampler = LengthSampler(output_min_length, output_max_length) generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, } for epoch, batch in enumerate(tqdm(ppo_trainer.dataloader)): query_tensors = batch["input_ids"] #### Get response from gpt2 response_tensors = [] for query in query_tensors: gen_len = output_length_sampler() generation_kwargs["max_new_tokens"] = gen_len query_response = ppo_trainer.generate(query, **generation_kwargs).squeeze() response_len = len(query_response) - len(query) response_tensors.append(query_response[-response_len:]) batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors] #### Compute sentiment score texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) positive_scores = [ item["score"] for output in pipe_outputs for item in output if item["label"] == "POSITIVE" ] rewards = [torch.tensor(score) for score in positive_scores] #### Run PPO step stats = ppo_trainer.step(query_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards)<jupyter_output><empty_output><jupyter_text>Training progressIf you are tracking the training progress with Weights&Biases you should see a plot similar to the one below. Check out the interactive sample report on wandb.ai: [link](https://wandb.ai/huggingface/trl/runs/w9l3110g). Figure: Reward mean and distribution evolution during training. One can observe how the model starts to generate more positive outputs after a few optimisation steps.> Note: Investigating the KL-divergence will probably show that at this point the model has not converged to the target KL-divergence, yet. To get there would require longer training or starting with a higher initial coefficient. Model inspectionLet's inspect some examples from the IMDB dataset. We can use `ref_model` to compare the tuned model `model` against the model before optimisation.<jupyter_code>#### get a batch from the dataset bs = 16 game_data = dict() dataset.set_format("pandas") df_batch = dataset[:].sample(bs) game_data["query"] = df_batch["query"].tolist() query_tensors = df_batch["input_ids"].tolist() response_tensors_ref, response_tensors = [], [] #### get response from gpt2 and gpt2_ref for i in range(bs): query = torch.tensor(query_tensors[i]).to(device) gen_len = output_length_sampler() query_response = ref_model.generate( query.unsqueeze(0), max_new_tokens=gen_len, **gen_kwargs ).squeeze() response_len = len(query_response) - len(query) response_tensors_ref.append(query_response[-response_len:]) query_response = model.generate( query.unsqueeze(0), max_new_tokens=gen_len, **gen_kwargs ).squeeze() response_len = len(query_response) - len(query) response_tensors.append(query_response[-response_len:]) #### decode responses game_data["response (before)"] = [ tokenizer.decode(response_tensors_ref[i]) for i in range(bs) ] game_data["response (after)"] = [ tokenizer.decode(response_tensors[i]) for i in range(bs) ] #### sentiment analysis of query/response pairs before/after texts = [q + r for q, r in zip(game_data["query"], game_data["response (before)"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) positive_scores = [ item["score"] for output in pipe_outputs for item in output if item["label"] == "POSITIVE" ] game_data["rewards (before)"] = positive_scores texts = [q + r for q, r in zip(game_data["query"], game_data["response (after)"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) positive_scores = [ item["score"] for output in pipe_outputs for item in output if item["label"] == "POSITIVE" ] game_data["rewards (after)"] = positive_scores # store results in a dataframe df_results = pd.DataFrame(game_data) df_results<jupyter_output><empty_output><jupyter_text>Looking at the reward mean/median of the generated sequences we observe a significant difference.<jupyter_code>print("mean:") display(df_results[["rewards (before)", "rewards (after)"]].mean()) print() print("median:") display(df_results[["rewards (before)", "rewards (after)"]].median())<jupyter_output>mean:<jupyter_text>Save modelFinally, we save the model and push it to the Hugging Face for later usage.<jupyter_code>model.save_pretrained("gpt2-imdb-pos-v2", push_to_hub=True) tokenizer.save_pretrained("gpt2-imdb-pos-v2", push_to_hub=True)<jupyter_output><empty_output>
trl/examples/notebooks/gpt2-sentiment.ipynb/0
{ "file_path": "trl/examples/notebooks/gpt2-sentiment.ipynb", "repo_id": "trl", "token_count": 3695 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torch.optim import Adam from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, RobertaForSequenceClassification, RobertaTokenizer, set_seed, ) from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, create_reference_model from trl.core import LengthSampler tqdm.pandas() ######################################################################## # This is a fully working simple example to use trl with accelerate. # # This example fine-tunes a GPTJ model to generate less toxic contents # by using allenai/real-toxicity-prompts dataset. We use PPO # (proximal policy optimization) to optimize the model. # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - multi GPUS (using DeepSpeed ZeRO-Offload stages 1 & 2) # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, first initialize the accelerate # configuration with `accelerate config` # ######################################################################## # We first define the configuration of the experiment, defining the model, the dataset, # the training parameters, and the PPO parameters. # Check the default arguments in the `PPOConfig` class for more details. # If you want to log with tensorboard, add the kwarg # `project_kwargs={"logging_dir": PATH_TO_LOGS}` to the PPOConfig. @dataclass class ScriptArguments: """ The name of the Casual LM model we wish to fine-tune with PPO """ # NOTE: gpt2 models use Conv1D instead of Linear layers which are not yet supported in 8 bit mode # models like gpt-neo* models are more suitable. model_name: Optional[str] = field(default="ybelkada/gpt-j-6b-sharded-bf16", metadata={"help": "the model name"}) log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) learning_rate: Optional[float] = field(default=(1.47e-5) * 2, metadata={"help": "the learning rate"}) mini_batch_size: Optional[int] = field(default=4, metadata={"help": "the PPO minibatch size"}) batch_size: Optional[int] = field(default=16, metadata={"help": "the batch size"}) gradient_accumulation_steps: Optional[int] = field( default=1, metadata={"help": "the number of gradient accumulation steps"} ) model_save_path: Optional[str] = field( default="./gpt-j-6B-detoxified-long-context-26-shl-1e4-final", metadata={"help": "the path to save the model"}, ) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] config = PPOConfig( model_name=script_args.model_name, learning_rate=script_args.learning_rate, log_with=script_args.log_with, ppo_epochs=100, mini_batch_size=script_args.mini_batch_size, batch_size=script_args.batch_size, gradient_accumulation_steps=script_args.gradient_accumulation_steps, ) # Below is an example function to build the dataset. In our case, we use the IMDB dataset # from the `datasets` library. One should customize this function to train the model on # its own dataset. def build_dataset( config, dataset_name="allenai/real-toxicity-prompts", input_min_text_length=5, input_max_text_length=10 ): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: dataset_name (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token ds = load_dataset(dataset_name, split="train") def filter_fn(sample): toxicity = sample["prompt"]["toxicity"] return toxicity is not None and toxicity > 0.3 ds = ds.filter(filter_fn, batched=False) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): prompt = sample["prompt"]["text"] continuation = sample["continuation"]["text"] sample["input_ids"] = tokenizer.encode(prompt + continuation)[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, batched=False) ds.set_format(type="torch") ds = ds.train_test_split(test_size=0.2, shuffle=False)["train"] return ds # We retrieve the dataloader by calling the `build_dataset` function. min_input_length = 30 max_input_length = 40 dataset = build_dataset(config, input_min_text_length=min_input_length, input_max_text_length=max_input_length) def collator(data): return {key: [d[key] for d in data] for key in data[0]} # set seed before initializing value head for deterministic eval set_seed(config.seed) # Now let's build the model, the reference model, and the tokenizer. We first load the model # in bfloat16 to save memory using `transformers`. model = AutoModelForCausalLM.from_pretrained(config.model_name, torch_dtype=torch.bfloat16) # And then we pass the loaded model to `AutoModelForCausalLMWithValueHead`. model = AutoModelForCausalLMWithValueHead.from_pretrained(model) # We create a reference model by sharing 20 layers ref_model = create_reference_model(model, num_shared_layers=20) # We make sure to use `Adam` optimizer on the model parameters that require gradients. optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.learning_rate) # GPT-2 / GPT-J tokenizer has a pad token, but it is not eos_token by default. We need to set it to eos_token. # only for this model. tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token # We then build the PPOTrainer, passing the model, the reference model, the tokenizer ppo_trainer = PPOTrainer( config, model, ref_model=ref_model, tokenizer=tokenizer, dataset=dataset, data_collator=collator, optimizer=optimizer, ) # We then build the reward pipeline, we will use the toxicity model to compute the reward. # We first load the toxicity model and tokenizer. toxicity_model_id = "facebook/roberta-hate-speech-dynabench-r4-target" toxicity_tokenizer = RobertaTokenizer.from_pretrained(toxicity_model_id) # We load the toxicity model in fp16 to save memory. toxicity_model = RobertaForSequenceClassification.from_pretrained(toxicity_model_id, torch_dtype=torch.float16).to( ppo_trainer.accelerator.device ) # We then define the arguments to pass to the `generate` function. These arguments # are passed to the `generate` function of the PPOTrainer, which is a wrapper around # the `generate` function of the trained model. generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, } output_min_length = 20 output_max_length = 30 output_length_sampler = LengthSampler(output_min_length, output_max_length) model_save_path = script_args.model_save_path for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): query_tensors = batch["input_ids"] # Get response from the policy model response_tensors = [] for query in query_tensors: gen_len = output_length_sampler() generation_kwargs["max_new_tokens"] = gen_len response = ppo_trainer.generate(query, **generation_kwargs) response_tensors.append(response.squeeze()[-gen_len:]) batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors] # Compute sentiment score texts = batch["response"] toxicity_inputs = toxicity_tokenizer(texts, padding=True, truncation=True, return_tensors="pt").to( ppo_trainer.accelerator.device ) logits = toxicity_model(**toxicity_inputs).logits.float() toxicity_labels = (logits[:, 0]).tolist() rewards = [torch.tensor(output) for output in toxicity_labels] # Run PPO step stats = ppo_trainer.step(query_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards) # Save model every 100 epochs if epoch % 100 == 0: if ppo_trainer.accelerator.is_main_process: ppo_trainer.save_pretrained(model_save_path)
trl/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py/0
{ "file_path": "trl/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py", "repo_id": "trl", "token_count": 3135 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Full training: python examples/scripts/prm.py \ --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ --dataset_name trl-lib/prm800k \ --output_dir Qwen2-0.5B-Reward \ --per_device_train_batch_size 8 \ --num_train_epochs 1 \ --gradient_checkpointing True \ --learning_rate 1.0e-5 \ --logging_steps 25 \ --eval_strategy steps \ --eval_steps 50 LoRA: python examples/scripts/prm.py \ --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ --dataset_name trl-lib/prm800k \ --output_dir Qwen2-0.5B-Reward-LoRA \ --per_device_train_batch_size 8 \ --num_train_epochs 1 \ --gradient_checkpointing True \ --learning_rate 1.0e-4 \ --logging_steps 25 \ --eval_strategy steps \ --eval_steps 50 --use_peft \ --lora_r 32 \ --lora_alpha 16 """ import warnings import torch from datasets import load_dataset from transformers import AutoModelForTokenClassification, AutoTokenizer, HfArgumentParser from trl import ( ModelConfig, PRMConfig, PRMTrainer, ScriptArguments, get_kbit_device_map, get_peft_config, get_quantization_config, ) if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, PRMConfig, ModelConfig)) script_args, training_args, model_config = parser.parse_args_into_dataclasses() training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False) ################ # Model & Tokenizer ################ torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, use_cache=False if training_args.gradient_checkpointing else True, ) tokenizer = AutoTokenizer.from_pretrained( model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, use_fast=True ) model = AutoModelForTokenClassification.from_pretrained( model_config.model_name_or_path, num_labels=2, trust_remote_code=model_config.trust_remote_code, **model_kwargs ) # Align padding tokens between tokenizer and model model.config.pad_token_id = tokenizer.pad_token_id if model_config.use_peft and model_config.lora_task_type != "TOKEN_CLS": warnings.warn( "You are using a `task_type` that is different than `TOKEN_CLS` for PEFT. This will lead to silent bugs" " Make sure to pass --lora_task_type TOKEN_CLS when using this script with PEFT.", UserWarning, ) ############## # Load dataset ############## dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) dataset = dataset.filter(lambda x: len(x["completions"]) > 0) ########## # Training ########## trainer = PRMTrainer( model=model, processing_class=tokenizer, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split], peft_config=get_peft_config(model_config), ) trainer.train() ############################ # Save model and push to Hub ############################ trainer.save_model(training_args.output_dir) metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name)
trl/examples/scripts/prm.py/0
{ "file_path": "trl/examples/scripts/prm.py", "repo_id": "trl", "token_count": 1702 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from transformers import AutoTokenizer from trl import DataCollatorForCompletionOnlyLM class DataCollatorForCompletionOnlyLMTester(unittest.TestCase): def test_data_collator_finds_response_template_llama2_tokenizer(self): # this should ideally be tested with meta-llama/Llama-2-7b-hf self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5") self.instruction = """### System: You are a helpful assistant. ### User: How much is 2+2? ### Assistant: 2+2 equals 4""" self.instruction_template = "\n### User:" self.response_template = "\n### Assistant:" # GPT2Tokenizer: [198, 21017, 11787, 25] -> [21017, 11787, 25] # Llama2Tokenizer: [29871, 13, 2277, 29937, 4911, 29901] -> [2277, 29937, 4911, 29901] # Note: If this test is ever switched to Llama2Tokenizer, this should be double checked, # and possibly switched back to [2:] instead of [1:]. # With GPT2Tokenizer, [1:] is correct - we want the 21017 token included, which is ###. self.tokenized_instruction_w_context = self.tokenizer.encode( self.instruction_template, add_special_tokens=False )[1:] # GPT2Tokenizer: [198, 21017, 15286, 25] -> [15286, 25] # Llama2Tokenizer: [29871, 13, 2277, 29937, 4007, 22137, 29901] -> [2277, 29937, 4007, 22137, 29901] self.tokenized_response_w_context = self.tokenizer.encode(self.response_template, add_special_tokens=False)[2:] # Plain check on string self.assertIn(self.response_template, self.instruction) self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) # Test the fix for #598 # Pass already tokenized (w context) and truncated response_template so token_ids are like in the instruction + response self.collator = DataCollatorForCompletionOnlyLM(self.tokenized_response_w_context, tokenizer=self.tokenizer) self.collator.torch_call([self.tokenized_instruction]) # Test for PR #749 # Pass already tokenized (w context) instruction and response both so token_ids are like in the instruction + response self.collator = DataCollatorForCompletionOnlyLM( self.tokenized_response_w_context, self.tokenized_instruction_w_context, tokenizer=self.tokenizer ) self.collator.torch_call([self.tokenized_instruction]) # Test for PR #1185 # We pass in a string where the first user template is different than the rest. # Usually this would happen due to context-sensitive tokenization, but here we # explicitly change the template to test the fix. self.instruction = """## User: First instruction ### Assistant: First response ### User: Second instruction ### Assistant: Second response""" self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) self.collator = DataCollatorForCompletionOnlyLM( self.tokenized_response_w_context, self.tokenized_instruction_w_context, tokenizer=self.tokenizer ) collator_output = self.collator.torch_call([self.tokenized_instruction]) collator_text = self.tokenizer.decode( collator_output["labels"][torch.where(collator_output["labels"] != -100)] ) expected_text = " First response\n\n Second response" "" self.assertEqual(collator_text, expected_text) def test_data_collator_handling_of_long_sequences(self): self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5") self.instruction = """### System: You are a helpful assistant. ### User: How much is 2+2? I'm asking because I'm not sure. And I'm not sure because I'm not good at math. """ self.response_template = "\n### Assistant:" # check DataCollatorForCompletionOnlyLM using response template only self.tokenized_instruction = self.tokenizer.encode(self.instruction, add_special_tokens=False) self.collator = DataCollatorForCompletionOnlyLM(self.response_template, tokenizer=self.tokenizer) encoded_instance = self.collator.torch_call([self.tokenized_instruction]) result = torch.all(encoded_instance["labels"] == -100) self.assertTrue(result, "Not all values in the tensor are -100.") # check DataCollatorForCompletionOnlyLM using response template and instruction template self.instruction_template = "\n### User:" self.collator = DataCollatorForCompletionOnlyLM( self.response_template, self.instruction_template, tokenizer=self.tokenizer ) encoded_instance = self.collator.torch_call([self.tokenized_instruction]) result = torch.all(encoded_instance["labels"] == -100) self.assertTrue(result, "Not all values in the tensor are -100.") def test_padding_free(self): tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5") if tokenizer.pad_token_id is None: tokenizer.pad_token = tokenizer.eos_token tokenizer.pad_token_id = tokenizer.eos_token_id inst1 = "### System: You are a helpful assistant.\n\n### User: How much is 2+2?\n\n### Assistant: 2+2 equals 4" inst2 = "### System: You are a honest and helpful assistant.\n\n### User: What is the answer of 22x22?\n\n### Assistant: 22x22 equals 484" response_template = "\n\n### Assistant:" collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer) collator_paddingfree = DataCollatorForCompletionOnlyLM( response_template, tokenizer=tokenizer, padding_free=True ) tokenized_instruction = [tokenizer(x, add_special_tokens=False) for x in [inst1, inst2]] batch = collator(tokenized_instruction) batch_paddingfree = collator_paddingfree(tokenized_instruction) self.assertNotIn("attention_mask", batch_paddingfree) self.assertIn("input_ids", batch_paddingfree) self.assertIn("labels", batch_paddingfree) self.assertIn("position_ids", batch_paddingfree) self.assertEqual(batch_paddingfree["input_ids"].size(), batch_paddingfree["labels"].size()) self.assertEqual(batch_paddingfree["labels"].size(), batch_paddingfree["position_ids"].size()) attn_mask = batch["attention_mask"] input_ids_remove_pad = batch["input_ids"][attn_mask.bool()].unsqueeze(0) expected_position_ids = attn_mask.cumsum(1)[attn_mask.bool()].unsqueeze(0) - 1 expected_labels = [] for idx in range(batch["input_ids"].size(0)): expected_labels.append(batch["labels"][idx][attn_mask[idx].bool()]) expected_labels[-1][0] = collator.ignore_index expected_labels = torch.cat(expected_labels).unsqueeze(0) self.assertTrue((input_ids_remove_pad == batch_paddingfree["input_ids"]).all()) self.assertTrue((expected_position_ids == batch_paddingfree["position_ids"]).all()) self.assertTrue((expected_labels == batch_paddingfree["labels"]).all()) def test_data_collator_for_completion_only_lm(self): # The tokenizer isn't use but the collator needs it to be provided. tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5") collator = DataCollatorForCompletionOnlyLM(tokenizer.decode(9999), tokenizer=tokenizer, padding_free=True) tokenized_instruction = [ {"input_ids": [1, 2, 3, 9999, 4, 5], "attention_mask": [1, 1, 1, 1, 1, 1]}, {"input_ids": [6, 7, 8, 9, 9999, 10, 11], "attention_mask": [1, 1, 1, 1, 1, 1, 1]}, ] batch = collator(tokenized_instruction) self.assertEqual(batch["position_ids"].tolist(), [[0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6]]) # flat pos ids self.assertEqual(batch["cu_seq_lens_q"].tolist(), [0, 6, 13]) # start idx of each seq + total number of tokens self.assertEqual(batch["cu_seq_lens_k"].tolist(), [0, 6, 13]) # idem self.assertEqual(batch["max_length_k"], 7) # max length in batch, here 7 (second sequence) self.assertEqual(batch["max_length_q"], 7) # idem
trl/tests/test_data_collator_completion_only.py/0
{ "file_path": "trl/tests/test_data_collator_completion_only.py", "repo_id": "trl", "token_count": 3404 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import torch from transformers import AutoModelForCausalLM from transformers.testing_utils import ( require_peft, require_torch_gpu_if_bnb_not_multi_backend_enabled, ) from transformers.utils import is_peft_available from trl import AutoModelForCausalLMWithValueHead if is_peft_available(): from peft import LoraConfig, get_peft_model @require_peft class PeftModelTester(unittest.TestCase): def setUp(self): self.causal_lm_model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) def test_create_peft_model(self): r""" Simply creates a peft model and checks that it can be loaded. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) _ = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) def test_peft_requires_grad(self): r""" Check that the value head of the returned model has requires_grad=True. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) # Check that the value head has requires_grad=True self.assertTrue(model.v_head.summary.weight.requires_grad) def test_check_peft_model_nb_trainable_params(self): r""" Check that the number of trainable parameters is correct. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905) # Check that the number of trainable param for the non-peft model is correct non_peft_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.causal_lm_model_id) nb_trainable_params = sum(p.numel() for p in non_peft_model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 2428641) def test_create_peft_model_from_config(self): r""" Simply creates a peft model and checks that it can be loaded. """ trl_model = AutoModelForCausalLMWithValueHead.from_pretrained( self.causal_lm_model_id, peft_config=self.lora_config ) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905) causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(causal_lm_model, peft_config=self.lora_config) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905) @require_torch_gpu_if_bnb_not_multi_backend_enabled def test_create_bnb_peft_model_from_config(self): r""" Simply creates a peft model and checks that it can be loaded. """ from bitsandbytes.nn import Linear8bitLt trl_model = AutoModelForCausalLMWithValueHead.from_pretrained( self.causal_lm_model_id, peft_config=self.lora_config, load_in_8bit=True ) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905) self.assertEqual(trl_model.pretrained_model.model.gpt_neox.layers[0].mlp.dense_h_to_4h.__class__, Linear8bitLt) causal_lm_model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, load_in_8bit=True, device_map="auto" ) trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(causal_lm_model, peft_config=self.lora_config) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905) self.assertEqual(trl_model.pretrained_model.model.gpt_neox.layers[0].mlp.dense_h_to_4h.__class__, Linear8bitLt) def test_save_pretrained_peft(self): r""" Check that the model can be saved and loaded properly. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # check that the files `adapter_model.safetensors` and `adapter_config.json` are in the directory self.assertTrue( os.path.isfile(f"{tmp_dir}/adapter_model.safetensors"), f"{tmp_dir}/adapter_model.safetensors does not exist", ) self.assertTrue( os.path.exists(f"{tmp_dir}/adapter_config.json"), f"{tmp_dir}/adapter_config.json does not exist" ) # check also for `pytorch_model.bin` and make sure it only contains `v_head` weights self.assertTrue( os.path.exists(f"{tmp_dir}/pytorch_model.bin"), f"{tmp_dir}/pytorch_model.bin does not exist" ) # check that only keys that starts with `v_head` are in the dict maybe_v_head = torch.load(f"{tmp_dir}/pytorch_model.bin", weights_only=True) self.assertTrue( all(k.startswith("v_head") for k in maybe_v_head.keys()), f"keys in {tmp_dir}/pytorch_model.bin do not start with `v_head`", ) model_from_pretrained = AutoModelForCausalLMWithValueHead.from_pretrained(tmp_dir) # check all the weights are the same for p1, p2 in zip(model.named_parameters(), model_from_pretrained.named_parameters()): self.assertTrue(torch.allclose(p1[1], p2[1]), f"{p1[0]} != {p2[0]}") def test_load_pretrained_peft(self): r""" Check that the model saved with peft class interface can be loaded properly. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) with tempfile.TemporaryDirectory() as tmp_dir: pretrained_model.save_pretrained(tmp_dir) model_from_pretrained = AutoModelForCausalLMWithValueHead.from_pretrained(tmp_dir) # check that the files `adapter_model.safetensors` and `adapter_config.json` are in the directory self.assertTrue( os.path.isfile(f"{tmp_dir}/adapter_model.safetensors"), f"{tmp_dir}/adapter_model.safetensors does not exist", ) self.assertTrue( os.path.exists(f"{tmp_dir}/adapter_config.json"), f"{tmp_dir}/adapter_config.json does not exist" ) # check all the weights are the same for p1, p2 in zip(model.named_parameters(), model_from_pretrained.named_parameters()): if p1[0] not in ["v_head.summary.weight", "v_head.summary.bias"]: self.assertTrue(torch.allclose(p1[1], p2[1]), f"{p1[0]} != {p2[0]}") def test_continue_training_peft_model(self): r""" Load peft and checks that it can continue training. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) with tempfile.TemporaryDirectory() as tmp_dir: pretrained_model.save_pretrained(tmp_dir) # set is_trainable to True model = AutoModelForCausalLMWithValueHead.from_pretrained(tmp_dir, is_trainable=True) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905)
trl/tests/test_peft_models.py/0
{ "file_path": "trl/tests/test_peft_models.py", "repo_id": "trl", "token_count": 4092 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ # Full training python trl/scripts/dpo.py \ --dataset_name trl-lib/ultrafeedback_binarized \ --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ --learning_rate 5.0e-7 \ --num_train_epochs 1 \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 8 \ --gradient_checkpointing \ --logging_steps 25 \ --eval_strategy steps \ --eval_steps 50 \ --output_dir Qwen2-0.5B-DPO \ --no_remove_unused_columns # LoRA: python trl/scripts/dpo.py \ --dataset_name trl-lib/ultrafeedback_binarized \ --model_name_or_path Qwen/Qwen2-0.5B-Instruct \ --learning_rate 5.0e-6 \ --num_train_epochs 1 \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 8 \ --gradient_checkpointing \ --logging_steps 25 \ --eval_strategy steps \ --eval_steps 50 \ --output_dir Qwen2-0.5B-DPO \ --no_remove_unused_columns \ --use_peft \ --lora_r 32 \ --lora_alpha 16 """ import argparse import torch from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer from trl import ( DPOConfig, DPOTrainer, ModelConfig, ScriptArguments, TrlParser, get_kbit_device_map, get_peft_config, get_quantization_config, ) from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE def main(script_args, training_args, model_args): ################ # Model & Tokenizer ################### torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) quantization_config = get_quantization_config(model_args) model_kwargs = dict( revision=model_args.model_revision, attn_implementation=model_args.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, **model_kwargs ) peft_config = get_peft_config(model_args) if peft_config is None: ref_model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code, **model_kwargs ) else: ref_model = None tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token if tokenizer.chat_template is None: tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE if script_args.ignore_bias_buffers: # torch distributed hack model._ddp_params_and_buffers_to_ignore = [ name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool ] ################ # Dataset ################ dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) ########## # Training ################ trainer = DPOTrainer( model, ref_model, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, processing_class=tokenizer, peft_config=peft_config, ) trainer.train() if training_args.eval_strategy != "no": metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name) def make_parser(subparsers: argparse._SubParsersAction = None): dataclass_types = (ScriptArguments, DPOConfig, ModelConfig) if subparsers is not None: parser = subparsers.add_parser("dpo", help="Run the DPO training script", dataclass_types=dataclass_types) else: parser = TrlParser(dataclass_types) return parser if __name__ == "__main__": parser = make_parser() script_args, training_args, model_args = parser.parse_args_and_config() main(script_args, training_args, model_args)
trl/trl/scripts/dpo.py/0
{ "file_path": "trl/trl/scripts/dpo.py", "repo_id": "trl", "token_count": 2024 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import textwrap from collections import defaultdict from concurrent import futures from typing import Any, Callable, Optional, Union from warnings import warn import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import PyTorchModelHubMixin from transformers import is_wandb_available from ..models import DDPOStableDiffusionPipeline from .ddpo_config import DDPOConfig from .utils import PerPromptStatTracker, generate_model_card, get_comet_experiment_url if is_wandb_available(): import wandb logger = get_logger(__name__) class DDPOTrainer(PyTorchModelHubMixin): """ The DDPOTrainer uses Deep Diffusion Policy Optimization to optimise diffusion models. Note, this trainer is heavily inspired by the work here: https://github.com/kvablack/ddpo-pytorch As of now only Stable Diffusion based pipelines are supported Attributes: **config** (`DDPOConfig`) -- Configuration object for DDPOTrainer. Check the documentation of `PPOConfig` for more details. **reward_function** (Callable[[torch.Tensor, tuple[str], tuple[Any]], torch.Tensor]) -- Reward function to be used **prompt_function** (Callable[[], tuple[str, Any]]) -- Function to generate prompts to guide model **sd_pipeline** (`DDPOStableDiffusionPipeline`) -- Stable Diffusion pipeline to be used for training. **image_samples_hook** (Optional[Callable[[Any, Any, Any], Any]]) -- Hook to be called to log images """ _tag_names = ["trl", "ddpo"] def __init__( self, config: DDPOConfig, reward_function: Callable[[torch.Tensor, tuple[str], tuple[Any]], torch.Tensor], prompt_function: Callable[[], tuple[str, Any]], sd_pipeline: DDPOStableDiffusionPipeline, image_samples_hook: Optional[Callable[[Any, Any, Any], Any]] = None, ): if image_samples_hook is None: warn("No image_samples_hook provided; no images will be logged") self.prompt_fn = prompt_function self.reward_fn = reward_function self.config = config self.image_samples_callback = image_samples_hook accelerator_project_config = ProjectConfiguration(**self.config.project_kwargs) if self.config.resume_from: self.config.resume_from = os.path.normpath(os.path.expanduser(self.config.resume_from)) if "checkpoint_" not in os.path.basename(self.config.resume_from): # get the most recent checkpoint in this directory checkpoints = list( filter( lambda x: "checkpoint_" in x, os.listdir(self.config.resume_from), ) ) if len(checkpoints) == 0: raise ValueError(f"No checkpoints found in {self.config.resume_from}") checkpoint_numbers = sorted([int(x.split("_")[-1]) for x in checkpoints]) self.config.resume_from = os.path.join( self.config.resume_from, f"checkpoint_{checkpoint_numbers[-1]}", ) accelerator_project_config.iteration = checkpoint_numbers[-1] + 1 # number of timesteps within each trajectory to train on self.num_train_timesteps = int(self.config.sample_num_steps * self.config.train_timestep_fraction) self.accelerator = Accelerator( log_with=self.config.log_with, mixed_precision=self.config.mixed_precision, project_config=accelerator_project_config, # we always accumulate gradients across timesteps; we want config.train.gradient_accumulation_steps to be the # number of *samples* we accumulate across, so we need to multiply by the number of training timesteps to get # the total number of optimizer steps to accumulate across. gradient_accumulation_steps=self.config.train_gradient_accumulation_steps * self.num_train_timesteps, **self.config.accelerator_kwargs, ) is_okay, message = self._config_check() if not is_okay: raise ValueError(message) is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard" if self.accelerator.is_main_process: self.accelerator.init_trackers( self.config.tracker_project_name, config=dict(ddpo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(), init_kwargs=self.config.tracker_kwargs, ) logger.info(f"\n{config}") set_seed(self.config.seed, device_specific=True) self.sd_pipeline = sd_pipeline self.sd_pipeline.set_progress_bar_config( position=1, disable=not self.accelerator.is_local_main_process, leave=False, desc="Timestep", dynamic_ncols=True, ) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. if self.accelerator.mixed_precision == "fp16": inference_dtype = torch.float16 elif self.accelerator.mixed_precision == "bf16": inference_dtype = torch.bfloat16 else: inference_dtype = torch.float32 self.sd_pipeline.vae.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.text_encoder.to(self.accelerator.device, dtype=inference_dtype) self.sd_pipeline.unet.to(self.accelerator.device, dtype=inference_dtype) trainable_layers = self.sd_pipeline.get_trainable_layers() self.accelerator.register_save_state_pre_hook(self._save_model_hook) self.accelerator.register_load_state_pre_hook(self._load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if self.config.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True self.optimizer = self._setup_optimizer( trainable_layers.parameters() if not isinstance(trainable_layers, list) else trainable_layers ) self.neg_prompt_embed = self.sd_pipeline.text_encoder( self.sd_pipeline.tokenizer( [""] if self.config.negative_prompts is None else self.config.negative_prompts, return_tensors="pt", padding="max_length", truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length, ).input_ids.to(self.accelerator.device) )[0] if config.per_prompt_stat_tracking: self.stat_tracker = PerPromptStatTracker( config.per_prompt_stat_tracking_buffer_size, config.per_prompt_stat_tracking_min_count, ) # NOTE: for some reason, autocast is necessary for non-lora training but for lora training it isn't necessary and it uses # more memory self.autocast = self.sd_pipeline.autocast or self.accelerator.autocast if hasattr(self.sd_pipeline, "use_lora") and self.sd_pipeline.use_lora: unet, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer) self.trainable_layers = list(filter(lambda p: p.requires_grad, unet.parameters())) else: self.trainable_layers, self.optimizer = self.accelerator.prepare(trainable_layers, self.optimizer) if self.config.async_reward_computation: self.executor = futures.ThreadPoolExecutor(max_workers=config.max_workers) if config.resume_from: logger.info(f"Resuming from {config.resume_from}") self.accelerator.load_state(config.resume_from) self.first_epoch = int(config.resume_from.split("_")[-1]) + 1 else: self.first_epoch = 0 def compute_rewards(self, prompt_image_pairs, is_async=False): if not is_async: rewards = [] for images, prompts, prompt_metadata in prompt_image_pairs: reward, reward_metadata = self.reward_fn(images, prompts, prompt_metadata) rewards.append( ( torch.as_tensor(reward, device=self.accelerator.device), reward_metadata, ) ) else: rewards = self.executor.map(lambda x: self.reward_fn(*x), prompt_image_pairs) rewards = [ (torch.as_tensor(reward.result(), device=self.accelerator.device), reward_metadata.result()) for reward, reward_metadata in rewards ] return zip(*rewards) def step(self, epoch: int, global_step: int): """ Perform a single step of training. Args: epoch (int): The current epoch. global_step (int): The current global step. Side Effects: - Model weights are updated - Logs the statistics to the accelerator trackers. - If `self.image_samples_callback` is not None, it will be called with the prompt_image_pairs, global_step, and the accelerator tracker. Returns: global_step (int): The updated global step. """ samples, prompt_image_data = self._generate_samples( iterations=self.config.sample_num_batches_per_epoch, batch_size=self.config.sample_batch_size, ) # collate samples into dict where each entry has shape (num_batches_per_epoch * sample.batch_size, ...) samples = {k: torch.cat([s[k] for s in samples]) for k in samples[0].keys()} rewards, rewards_metadata = self.compute_rewards( prompt_image_data, is_async=self.config.async_reward_computation ) for i, image_data in enumerate(prompt_image_data): image_data.extend([rewards[i], rewards_metadata[i]]) if self.image_samples_callback is not None: self.image_samples_callback(prompt_image_data, global_step, self.accelerator.trackers[0]) rewards = torch.cat(rewards) rewards = self.accelerator.gather(rewards).cpu().numpy() self.accelerator.log( { "reward": rewards, "epoch": epoch, "reward_mean": rewards.mean(), "reward_std": rewards.std(), }, step=global_step, ) if self.config.per_prompt_stat_tracking: # gather the prompts across processes prompt_ids = self.accelerator.gather(samples["prompt_ids"]).cpu().numpy() prompts = self.sd_pipeline.tokenizer.batch_decode(prompt_ids, skip_special_tokens=True) advantages = self.stat_tracker.update(prompts, rewards) else: advantages = (rewards - rewards.mean()) / (rewards.std() + 1e-8) # ungather advantages; keep the entries corresponding to the samples on this process samples["advantages"] = ( torch.as_tensor(advantages) .reshape(self.accelerator.num_processes, -1)[self.accelerator.process_index] .to(self.accelerator.device) ) del samples["prompt_ids"] total_batch_size, num_timesteps = samples["timesteps"].shape for inner_epoch in range(self.config.train_num_inner_epochs): # shuffle samples along batch dimension perm = torch.randperm(total_batch_size, device=self.accelerator.device) samples = {k: v[perm] for k, v in samples.items()} # shuffle along time dimension independently for each sample # still trying to understand the code below perms = torch.stack( [torch.randperm(num_timesteps, device=self.accelerator.device) for _ in range(total_batch_size)] ) for key in ["timesteps", "latents", "next_latents", "log_probs"]: samples[key] = samples[key][ torch.arange(total_batch_size, device=self.accelerator.device)[:, None], perms, ] original_keys = samples.keys() original_values = samples.values() # rebatch them as user defined train_batch_size is different from sample_batch_size reshaped_values = [v.reshape(-1, self.config.train_batch_size, *v.shape[1:]) for v in original_values] # Transpose the list of original values transposed_values = zip(*reshaped_values) # Create new dictionaries for each row of transposed values samples_batched = [dict(zip(original_keys, row_values)) for row_values in transposed_values] self.sd_pipeline.unet.train() global_step = self._train_batched_samples(inner_epoch, epoch, global_step, samples_batched) # ensure optimization step at the end of the inner epoch if not self.accelerator.sync_gradients: raise ValueError( "Optimization step should have been performed by this point. Please check calculated gradient accumulation settings." ) if epoch != 0 and epoch % self.config.save_freq == 0 and self.accelerator.is_main_process: self.accelerator.save_state() return global_step def calculate_loss(self, latents, timesteps, next_latents, log_probs, advantages, embeds): """ Calculate the loss for a batch of an unpacked sample Args: latents (torch.Tensor): The latents sampled from the diffusion model, shape: [batch_size, num_channels_latents, height, width] timesteps (torch.Tensor): The timesteps sampled from the diffusion model, shape: [batch_size] next_latents (torch.Tensor): The next latents sampled from the diffusion model, shape: [batch_size, num_channels_latents, height, width] log_probs (torch.Tensor): The log probabilities of the latents, shape: [batch_size] advantages (torch.Tensor): The advantages of the latents, shape: [batch_size] embeds (torch.Tensor): The embeddings of the prompts, shape: [2*batch_size or batch_size, ...] Note: the "or" is because if train_cfg is True, the expectation is that negative prompts are concatenated to the embeds Returns: loss (torch.Tensor), approx_kl (torch.Tensor), clipfrac (torch.Tensor) (all of these are of shape (1,)) """ with self.autocast(): if self.config.train_cfg: noise_pred = self.sd_pipeline.unet( torch.cat([latents] * 2), torch.cat([timesteps] * 2), embeds, ).sample noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.config.sample_guidance_scale * ( noise_pred_text - noise_pred_uncond ) else: noise_pred = self.sd_pipeline.unet( latents, timesteps, embeds, ).sample # compute the log prob of next_latents given latents under the current model scheduler_step_output = self.sd_pipeline.scheduler_step( noise_pred, timesteps, latents, eta=self.config.sample_eta, prev_sample=next_latents, ) log_prob = scheduler_step_output.log_probs advantages = torch.clamp( advantages, -self.config.train_adv_clip_max, self.config.train_adv_clip_max, ) ratio = torch.exp(log_prob - log_probs) loss = self.loss(advantages, self.config.train_clip_range, ratio) approx_kl = 0.5 * torch.mean((log_prob - log_probs) ** 2) clipfrac = torch.mean((torch.abs(ratio - 1.0) > self.config.train_clip_range).float()) return loss, approx_kl, clipfrac def loss( self, advantages: torch.Tensor, clip_range: float, ratio: torch.Tensor, ): unclipped_loss = -advantages * ratio clipped_loss = -advantages * torch.clamp( ratio, 1.0 - clip_range, 1.0 + clip_range, ) return torch.mean(torch.maximum(unclipped_loss, clipped_loss)) def _setup_optimizer(self, trainable_layers_parameters): if self.config.train_use_8bit_adam: import bitsandbytes optimizer_cls = bitsandbytes.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW return optimizer_cls( trainable_layers_parameters, lr=self.config.train_learning_rate, betas=(self.config.train_adam_beta1, self.config.train_adam_beta2), weight_decay=self.config.train_adam_weight_decay, eps=self.config.train_adam_epsilon, ) def _save_model_hook(self, models, weights, output_dir): self.sd_pipeline.save_checkpoint(models, weights, output_dir) weights.pop() # ensures that accelerate doesn't try to handle saving of the model def _load_model_hook(self, models, input_dir): self.sd_pipeline.load_checkpoint(models, input_dir) models.pop() # ensures that accelerate doesn't try to handle loading of the model def _generate_samples(self, iterations, batch_size): """ Generate samples from the model Args: iterations (int): Number of iterations to generate samples for batch_size (int): Batch size to use for sampling Returns: samples (list[dict[str, torch.Tensor]]), prompt_image_pairs (list[list[Any]]) """ samples = [] prompt_image_pairs = [] self.sd_pipeline.unet.eval() sample_neg_prompt_embeds = self.neg_prompt_embed.repeat(batch_size, 1, 1) for _ in range(iterations): prompts, prompt_metadata = zip(*[self.prompt_fn() for _ in range(batch_size)]) prompt_ids = self.sd_pipeline.tokenizer( prompts, return_tensors="pt", padding="max_length", truncation=True, max_length=self.sd_pipeline.tokenizer.model_max_length, ).input_ids.to(self.accelerator.device) prompt_embeds = self.sd_pipeline.text_encoder(prompt_ids)[0] with self.autocast(): sd_output = self.sd_pipeline( prompt_embeds=prompt_embeds, negative_prompt_embeds=sample_neg_prompt_embeds, num_inference_steps=self.config.sample_num_steps, guidance_scale=self.config.sample_guidance_scale, eta=self.config.sample_eta, output_type="pt", ) images = sd_output.images latents = sd_output.latents log_probs = sd_output.log_probs latents = torch.stack(latents, dim=1) # (batch_size, num_steps + 1, ...) log_probs = torch.stack(log_probs, dim=1) # (batch_size, num_steps, 1) timesteps = self.sd_pipeline.scheduler.timesteps.repeat(batch_size, 1) # (batch_size, num_steps) samples.append( { "prompt_ids": prompt_ids, "prompt_embeds": prompt_embeds, "timesteps": timesteps, "latents": latents[:, :-1], # each entry is the latent before timestep t "next_latents": latents[:, 1:], # each entry is the latent after timestep t "log_probs": log_probs, "negative_prompt_embeds": sample_neg_prompt_embeds, } ) prompt_image_pairs.append([images, prompts, prompt_metadata]) return samples, prompt_image_pairs def _train_batched_samples(self, inner_epoch, epoch, global_step, batched_samples): """ Train on a batch of samples. Main training segment Args: inner_epoch (int): The current inner epoch epoch (int): The current epoch global_step (int): The current global step batched_samples (list[dict[str, torch.Tensor]]): The batched samples to train on Side Effects: - Model weights are updated - Logs the statistics to the accelerator trackers. Returns: global_step (int): The updated global step """ info = defaultdict(list) for _i, sample in enumerate(batched_samples): if self.config.train_cfg: # concat negative prompts to sample prompts to avoid two forward passes embeds = torch.cat([sample["negative_prompt_embeds"], sample["prompt_embeds"]]) else: embeds = sample["prompt_embeds"] for j in range(self.num_train_timesteps): with self.accelerator.accumulate(self.sd_pipeline.unet): loss, approx_kl, clipfrac = self.calculate_loss( sample["latents"][:, j], sample["timesteps"][:, j], sample["next_latents"][:, j], sample["log_probs"][:, j], sample["advantages"], embeds, ) info["approx_kl"].append(approx_kl) info["clipfrac"].append(clipfrac) info["loss"].append(loss) self.accelerator.backward(loss) if self.accelerator.sync_gradients: self.accelerator.clip_grad_norm_( self.trainable_layers.parameters() if not isinstance(self.trainable_layers, list) else self.trainable_layers, self.config.train_max_grad_norm, ) self.optimizer.step() self.optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if self.accelerator.sync_gradients: # log training-related stuff info = {k: torch.mean(torch.stack(v)) for k, v in info.items()} info = self.accelerator.reduce(info, reduction="mean") info.update({"epoch": epoch, "inner_epoch": inner_epoch}) self.accelerator.log(info, step=global_step) global_step += 1 info = defaultdict(list) return global_step def _config_check(self) -> tuple[bool, str]: samples_per_epoch = ( self.config.sample_batch_size * self.accelerator.num_processes * self.config.sample_num_batches_per_epoch ) total_train_batch_size = ( self.config.train_batch_size * self.accelerator.num_processes * self.config.train_gradient_accumulation_steps ) if not self.config.sample_batch_size >= self.config.train_batch_size: return ( False, f"Sample batch size ({self.config.sample_batch_size}) must be greater than or equal to the train batch size ({self.config.train_batch_size})", ) if not self.config.sample_batch_size % self.config.train_batch_size == 0: return ( False, f"Sample batch size ({self.config.sample_batch_size}) must be divisible by the train batch size ({self.config.train_batch_size})", ) if not samples_per_epoch % total_train_batch_size == 0: return ( False, f"Number of samples per epoch ({samples_per_epoch}) must be divisible by the total train batch size ({total_train_batch_size})", ) return True, "" def train(self, epochs: Optional[int] = None): """ Train the model for a given number of epochs """ global_step = 0 if epochs is None: epochs = self.config.num_epochs for epoch in range(self.first_epoch, epochs): global_step = self.step(epoch, global_step) def _save_pretrained(self, save_directory): self.sd_pipeline.save_pretrained(save_directory) self.create_model_card() def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None tags = tags or [] if isinstance(tags, str): tags = [tags] if hasattr(self.model.config, "unsloth_version"): tags.append("unsloth") citation = textwrap.dedent("""\ @inproceedings{black2024training, title = {{Training Diffusion Models with Reinforcement Learning}}, author = {Kevin Black and Michael Janner and Yilun Du and Ilya Kostrikov and Sergey Levine}, year = 2024, booktitle = {The Twelfth International Conference on Learning Representations, {ICLR} 2024, Vienna, Austria, May 7-11, 2024}, publisher = {OpenReview.net}, url = {https://openreview.net/forum?id=YCWjhGrJFD}, }""") model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="DDPO", trainer_citation=citation, paper_title="Training Diffusion Models with Reinforcement Learning", paper_id="2305.13301", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/ddpo_trainer.py/0
{ "file_path": "trl/trl/trainer/ddpo_trainer.py", "repo_id": "trl", "token_count": 12992 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Any, Optional from transformers import TrainingArguments @dataclass class ORPOConfig(TrainingArguments): r""" Configuration class for the [`ORPOTrainer`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: learning_rate (`float`, *optional*, defaults to `1e-6`): Initial learning rate for [`AdamW`] optimizer. The default value replaces that of [`~transformers.TrainingArguments`]. max_length (`int` or `None`, *optional*, defaults to `1024`): Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`int` or `None`, *optional*, defaults to `512`): Maximum length of the prompt. This argument is required if you want to use the default data collator. max_completion_length (`int` or `None`, *optional*, defaults to `None`): Maximum length of the completion. This argument is required if you want to use the default data collator and your model is an encoder-decoder. beta (`float`, *optional*, defaults to `0.1`): Parameter controlling the relative ratio loss weight in the ORPO loss. In the [paper](https://huggingface.co/papers/2403.07691), it is denoted by Ξ». In the [code](https://github.com/xfactlab/orpo), it is denoted by `alpha`. disable_dropout (`bool`, *optional*, defaults to `True`): Whether to disable dropout in the model. label_pad_token_id (`int`, *optional*, defaults to `-100`): Label pad token id. This argument is required if you want to use the default data collator. padding_value (`int` or `None`, *optional*, defaults to `None`): Padding value to use. If `None`, the padding value of the tokenizer is used. truncation_mode (`str`, *optional*, defaults to `"keep_end"`): Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`. This argument is required if you want to use the default data collator. generate_during_eval (`bool`, *optional*, defaults to `False`): If `True`, generates and logs completions from the model to W&B or Comet during evaluation. is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`): When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument, you need to specify if the model returned by the callable is an encoder-decoder model. model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`): Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a string. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. """ learning_rate: float = field( default=1e-6, metadata={ "help": "Initial learning rate for `AdamW` optimizer. The default value replaces that of " "transformers.TrainingArguments." }, ) max_length: Optional[int] = field( default=1024, metadata={"help": "Maximum length of the sequences (prompt + completion) in the batch."}, ) max_prompt_length: Optional[int] = field( default=512, metadata={ "help": "Maximum length of the prompt. This argument is required if you want to use the default data " "collator and your model is an encoder-decoder." }, ) max_completion_length: Optional[int] = field( default=None, metadata={ "help": "Maximum length of the completion. This argument is required if you want to use the default data " "collator and your model is an encoder-decoder." }, ) beta: float = field( default=0.1, metadata={ "help": "Parameter controlling the relative ratio loss weight in the ORPO loss. In the paper, it is " "denoted by Ξ»." }, ) disable_dropout: bool = field( default=True, metadata={"help": "Whether to disable dropout in the model."}, ) label_pad_token_id: int = field( default=-100, metadata={ "help": "Label pad token id. This argument is required if you want to use the default data collator." }, ) padding_value: Optional[int] = field( default=None, metadata={"help": "Padding value to use. If `None`, the padding value of the tokenizer is used."}, ) truncation_mode: str = field( default="keep_end", metadata={ "help": "Truncation mode to use when the prompt is too long.", "choices": ["keep_end", "keep_start"], }, ) generate_during_eval: bool = field( default=False, metadata={"help": "If `True`, generates and logs completions from the model to W&B during evaluation."}, ) is_encoder_decoder: Optional[bool] = field( default=None, metadata={ "help": "When using the `model_init` argument (callable) to instantiate the model instead of the `model` " "argument, you need to specify if the model returned by the callable is an encoder-decoder model." }, ) model_init_kwargs: Optional[dict[str, Any]] = field( default=None, metadata={ "help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model " "from a string." }, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of processes to use for processing the dataset."}, )
trl/trl/trainer/orpo_config.py/0
{ "file_path": "trl/trl/trainer/orpo_config.py", "repo_id": "trl", "token_count": 2486 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import threading import time import psutil import torch class PeakCPUMemory: def __init__(self): self.process = psutil.Process() self.peak_monitoring = False def peak_monitor(self): self.cpu_memory_peak = -1 while True: self.cpu_memory_peak = max(self.process.memory_info().rss, self.cpu_memory_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def start(self): self.peak_monitoring = True self.thread = threading.Thread(target=self.peak_monitor) self.thread.daemon = True self.thread.start() def stop(self): self.peak_monitoring = False self.thread.join() return self.cpu_memory_peak cpu_peak_tracker = PeakCPUMemory() def start_measure(): # Time measures = {"time": time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem measures["cpu"] = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count()): measures[str(i)] = torch.cuda.memory_allocated(i) torch.cuda.reset_peak_memory_stats() return measures def end_measure(start_measures): # Time measures = {"time": time.time() - start_measures["time"]} gc.collect() torch.cuda.empty_cache() # CPU mem measures["cpu"] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20 measures["cpu-peak"] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20 # GPU mem for i in range(torch.cuda.device_count()): measures[str(i)] = (torch.cuda.memory_allocated(i) - start_measures[str(i)]) / 2**20 measures[f"{i}-peak"] = (torch.cuda.max_memory_allocated(i) - start_measures[str(i)]) / 2**20 return measures def log_measures(measures, description): print(f"{description}:") print(f"- Time: {measures['time']:.2f}s") for i in range(torch.cuda.device_count()): print(f"- GPU {i} allocated: {measures[str(i)]:.2f}MiB") peak = measures[f"{i}-peak"] print(f"- GPU {i} peak: {peak:.2f}MiB") print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB") print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB")
accelerate/benchmarks/big_model_inference/measures_util.py/0
{ "file_path": "accelerate/benchmarks/big_model_inference/measures_util.py", "repo_id": "accelerate", "token_count": 1146 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Accelerate's internal mechanisms Internally, Accelerate works by first analyzing the environment in which the script is launched to determine which kind of distributed setup is used, how many different processes there are and which one the current script is in. All that information is stored in the [`~AcceleratorState`]. This class is initialized the first time you instantiate an [`~Accelerator`] as well as performing any specific initialization your distributed setup needs. Its state is then uniquely shared through all instances of [`~state.AcceleratorState`]. (The same can also be done with the [`PartialState`], a more barebones version it inherits) Then, when calling [`~Accelerator.prepare`], the library: - wraps your model(s) in the container adapted for the distributed setup, - wraps your optimizer(s) in an [`~optimizer.AcceleratedOptimizer`], - wraps your scheduler(s) in an [`~scheduler.AcceleratedScheduler`] - creates a new version of your dataloader(s) in a [`~data_loader.DataLoaderShard`] or [`~data_loader.DataLoaderDispatcher`] While the model(s), optimizer(s), and scheduler(s) are just put in simple wrappers, the dataloader(s) are re-created. This is mostly because PyTorch does not let the user change the `batch_sampler` of a dataloader once it's been created and the library handles the sharding of your data between processes by changing that `batch_sampler` to yield every other `num_processes` batches (if enabled). The [`~data_loader.DataLoaderShard`] subclasses `DataLoader` to add the following functionality: - it synchronizes the appropriate random number generator of all processes at each new iteration, to ensure any randomization (like shuffling) is done the exact same way across processes. - it puts the batches on the proper device before yielding them (unless you have opted out of `device_placement=True`). The [`~data_loader.DataLoaderDispatcher`] subclasses differs from the [`~data_loader.DataLoaderShard`] in that when iterating through the `DataLoader`, the data is all starting from process 0 and *then* split and sent off to each process rather than it happening at the dataset level. The random number generator synchronization will by default synchronize: - the `generator` attribute of a given sampler (like the PyTorch `RandomSampler`) for PyTorch >= 1.6 - the main random number generator in PyTorch <=1.5.1 You can choose which random number generator(s) to synchronize with the `rng_types` argument of the main [`Accelerator`]. In PyTorch >= 1.6, it is recommended to rely on a local `generator` to avoid setting the same seed in the main random number generator in all processes. <Tip warning={true}> Synchronization of the main torch (or CUDA or XLA) random number generator will affect any other potential random artifacts you could have in your dataset (like random data augmentation) in the sense that all processes will get the same random numbers from the torch random modules (so will apply the same random data augmentation if it's controlled by torch). </Tip> <Tip> The randomization part of your custom sampler, batch sampler or iterable dataset should be done using a local `torch.Generator` object (in PyTorch >= 1.6), see the traditional `RandomSampler`, as an example. </Tip> If you have [`torchdata>=0.8.0`](https://github.com/pytorch/data/tree/main) installed, and you have passed `use_stateful_dataloader=True` into your [`~utils.DataLoaderConfiguration`], these classes will directly inherit from `StatefulDataLoader` instead, and maintain a `state_dict`. For more details about the internals, see the [Internals page](package_reference/torch_wrappers).
accelerate/docs/source/concept_guides/internal_mechanism.md/0
{ "file_path": "accelerate/docs/source/concept_guides/internal_mechanism.md", "repo_id": "accelerate", "token_count": 1178 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Performing gradient accumulation with Accelerate Gradient accumulation is a technique where you can train on bigger batch sizes than your machine would normally be able to fit into memory. This is done by accumulating gradients over several batches, and only stepping the optimizer after a certain number of batches have been performed. While technically standard gradient accumulation code would work fine in a distributed setup, it is not the most efficient method for doing so and you may experience considerable slowdowns! In this tutorial you will see how to quickly setup gradient accumulation and perform it with the utilities provided in Accelerate, which can total to adding just one new line of code! This example will use a very simplistic PyTorch training loop that performs gradient accumulation every two batches: ```python device = "cuda" model.to(device) gradient_accumulation_steps = 2 for index, batch in enumerate(training_dataloader): inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss = loss / gradient_accumulation_steps loss.backward() if (index + 1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` ## Converting it to Accelerate First the code shown earlier will be converted to utilize Accelerate without the special gradient accumulation helper: ```diff + from accelerate import Accelerator + accelerator = Accelerator() + model, optimizer, training_dataloader, scheduler = accelerator.prepare( + model, optimizer, training_dataloader, scheduler + ) for index, batch in enumerate(training_dataloader): inputs, targets = batch - inputs = inputs.to(device) - targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss = loss / gradient_accumulation_steps + accelerator.backward(loss) if (index+1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` <Tip warning={true}> In its current state, this code is not going to perform gradient accumulation efficiently due to a process called gradient synchronization. Read more about that in the [Concepts tutorial](../concept_guides/gradient_synchronization)! </Tip> ## Letting Accelerate handle gradient accumulation All that is left now is to let Accelerate handle the gradient accumulation for us. To do so you should pass in a `gradient_accumulation_steps` parameter to [`Accelerator`], dictating the number of steps to perform before each call to `step()` and how to automatically adjust the loss during the call to [`~Accelerator.backward`]: ```diff from accelerate import Accelerator - accelerator = Accelerator() + accelerator = Accelerator(gradient_accumulation_steps=2) ``` Alternatively, you can pass in a `gradient_accumulation_plugin` parameter to the [`Accelerator`] object's `__init__`, which will allow you to further customize the gradient accumulation behavior. Read more about that in the [GradientAccumulationPlugin](../package_reference/accelerator#accelerate.utils.GradientAccumulationPlugin) docs. From here you can use the [`~Accelerator.accumulate`] context manager from inside your training loop to automatically perform the gradient accumulation for you! You just wrap it around the entire training part of our code: ```diff - for index, batch in enumerate(training_dataloader): + for batch in training_dataloader: + with accelerator.accumulate(model): inputs, targets = batch outputs = model(inputs) ``` You can remove all the special checks for the step number and the loss adjustment: ```diff - loss = loss / gradient_accumulation_steps accelerator.backward(loss) - if (index+1) % gradient_accumulation_steps == 0: optimizer.step() scheduler.step() optimizer.zero_grad() ``` As you can see the [`Accelerator`] is able to keep track of the batch number you are on and it will automatically know whether to step through the prepared optimizer and how to adjust the loss. <Tip> Typically with gradient accumulation, you would need to adjust the number of steps to reflect the change in total batches you are training on. Accelerate automagically does this for you by default. Behind the scenes we instantiate a [`GradientAccumulationPlugin`] configured to do this. </Tip> <Tip warning={true}> The [`state.GradientState`] is sync'd with the active dataloader being iterated upon. As such it assumes naively that when we have reached the end of the dataloader everything will sync and a step will be performed. To disable this, set `sync_with_dataloader` to be `False` in the [`GradientAccumulationPlugin`]: ```{python} from accelerate import Accelerator from accelerate.utils import GradientAccumulationPlugin plugin = GradientAccumulationPlugin(sync_with_dataloader=False) accelerator = Accelerator(..., gradient_accumulation_plugin=plugin) ``` </Tip> ## The finished code Below is the finished implementation for performing gradient accumulation with Accelerate ```python from accelerate import Accelerator accelerator = Accelerator(gradient_accumulation_steps=2) model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) for batch in training_dataloader: with accelerator.accumulate(model): inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) accelerator.backward(loss) optimizer.step() scheduler.step() optimizer.zero_grad() ``` <Tip warning={true}> It's important that **only one forward/backward** should be done inside the context manager `with accelerator.accumulate(model)`. </Tip> To learn more about what magic this wraps around, read the [Gradient Synchronization concept guide](../concept_guides/gradient_synchronization) ## Self-contained example Here is a self-contained example that you can run to see gradient accumulation in action with Accelerate: ```python import torch import copy from accelerate import Accelerator from accelerate.utils import set_seed from torch.utils.data import TensorDataset, DataLoader # seed set_seed(0) # define toy inputs and labels x = torch.tensor([1., 2., 3., 4., 5., 6., 7., 8.]) y = torch.tensor([2., 4., 6., 8., 10., 12., 14., 16.]) gradient_accumulation_steps = 4 per_device_batch_size = len(x) // gradient_accumulation_steps # define dataset and dataloader dataset = TensorDataset(x, y) dataloader = DataLoader(dataset, batch_size=per_device_batch_size) # define model, optimizer and loss function class SimpleLinearModel(torch.nn.Module): def __init__(self): super(SimpleLinearModel, self).__init__() self.weight = torch.nn.Parameter(torch.zeros((1, 1))) def forward(self, inputs): return inputs @ self.weight model = SimpleLinearModel() model_clone = copy.deepcopy(model) criterion = torch.nn.MSELoss() model_optimizer = torch.optim.SGD(model.parameters(), lr=0.02) accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps) model, model_optimizer, dataloader = accelerator.prepare(model, model_optimizer, dataloader) model_clone_optimizer = torch.optim.SGD(model_clone.parameters(), lr=0.02) print(f"initial model weight is {model.weight.mean().item():.5f}") print(f"initial model weight is {model_clone.weight.mean().item():.5f}") for i, (inputs, labels) in enumerate(dataloader): with accelerator.accumulate(model): inputs = inputs.view(-1, 1) print(i, inputs.flatten()) labels = labels.view(-1, 1) outputs = model(inputs) loss = criterion(outputs, labels) accelerator.backward(loss) model_optimizer.step() model_optimizer.zero_grad() loss = criterion(x.view(-1, 1) @ model_clone.weight, y.view(-1, 1)) model_clone_optimizer.zero_grad() loss.backward() model_clone_optimizer.step() print(f"w/ accumulation, the final model weight is {model.weight.mean().item():.5f}") print(f"w/o accumulation, the final model weight is {model_clone.weight.mean().item():.5f}") ``` ``` initial model weight is 0.00000 initial model weight is 0.00000 0 tensor([1., 2.]) 1 tensor([3., 4.]) 2 tensor([5., 6.]) 3 tensor([7., 8.]) w/ accumulation, the final model weight is 2.04000 w/o accumulation, the final model weight is 2.04000 ``` ## Gradient accumulation on training samples of variable size As was pointed out in this [blog-post](https://huggingface.co/blog/gradient_accumulation), which points out a common error that occurs when performing gradient accumulation on training samples of variable size: > [...] for gradient accumulation across token-level tasks like causal LM training, the correct loss should be computed by the **total loss across all batches in a gradient accumulation step** divided by the **total number of all non padding tokens in those batches**. This is not the same as the average of the per-batch loss values. In other words, some adjustements must be made on losses that operate on a token-level basis. ### Skeleton code ```python from accelerate import Accelerator import math import contextlib gradient_accumulation_steps = 2 accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps) model, optimizer, training_dataloader, scheduler = accelerator.prepare( model, optimizer, training_dataloader, scheduler ) training_iterator = iter(training_dataloader) num_samples_in_epoch = len(training_dataloader) remainder = num_samples_in_epoch % gradient_accumulation_steps remainder = remainder if remainder != 0 else gradient_accumulation_steps total_updates = math.ceil(num_samples_in_epoch / gradient_accumulation_steps) total_batched_samples = 0 for update_step in range(total_updates): # In order to correctly the total number of non-padded tokens on which we'll compute the cross-entropy loss # we need to pre-load the full local batch - i.e the next per_device_batch_size * accumulation_steps samples batch_samples = [] num_batches_in_step = gradient_accumulation_steps if update_step != (total_updates - 1) else remainder for _ in range(num_batches_in_step): batch_samples += [next(training_iterator)] # get local num items in batch num_items_in_batch = sum([(batch["labels"].ne(-100)).sum() for batch in batch_samples]) # to compute it correctly in a multi-device DDP training, we need to gather the total number of items in the full batch. num_items_in_batch = accelerator.gather(num_items_in_batch).sum().item() for i, batch in enumerate(batch_samples): # if we perform gradient accumulation in a multi-devices set-up, we want to avoid unecessary communications when accumulating # cf: https://muellerzr.github.io/blog/gradient_accumulation.html if (i < len(batch_samples) - 1 and accelerator.num_processes > 1): ctx = model.no_sync else: ctx = contextlib.nullcontext total_batched_samples += 1 with ctx(): inputs, targets = batch outputs = model(inputs) loss = loss_function(outputs, targets) # the loss function shoud sum over samples rather than averaging # We multiply by num_processes because the DDP calculates the average gradient across all devices whereas dividing by num_items_in_batch already takes into account all devices # Same reason for gradient_accumulation_steps, but this times it's Accelerate that calculate the average gradient across the accumulated steps loss = (loss * gradient_accumulation_steps * accelerator.num_processes) / num_items_in_batch accelerator.backward(loss) # Sync gradients and perform optimization steps once every gradient_accumulation_steps optimizer.step() scheduler.step() optimizer.zero_grad() ``` ### Self-contained causal LM example ```py import torch import copy from accelerate import Accelerator from accelerate.utils import set_seed from accelerate.logging import get_logger from torch.utils.data import Dataset, DataLoader import math import contexlib # seed set_seed(0) logger = get_logger(__name__) class MyDataset(Dataset): def __init__(self, num_samples): super().__init__() self.len = num_samples def __getitem__(self, index): input_ids = torch.arange(1, index+2, dtype=torch.float32) labels = torch.remainder(input_ids, 2) return {"input_ids": input_ids, "labels": labels} def __len__(self): return self.len def collate_fn(features): input_ids = torch.nn.utils.rnn.pad_sequence([f["input_ids"] for f in features], batch_first=True, padding_value=-100) labels = torch.nn.utils.rnn.pad_sequence([f["labels"] for f in features], batch_first=True, padding_value=-100) return {"input_ids": input_ids[..., None], "labels": labels[..., None]} # define toy inputs and labels gradient_accumulation_steps = 2 per_device_batch_size = 4 # define accelerator accelerator = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps) # define dataset and dataloader # for this toy example, we'll compute gradient descent over one single global batch dataset = MyDataset(per_device_batch_size*gradient_accumulation_steps*accelerator.num_processes) dataloader = DataLoader(dataset, batch_size=per_device_batch_size, collate_fn=collate_fn) # define model, model_optimizer and loss function model = torch.nn.Linear(1, 2, bias=False) model_clone = copy.deepcopy(model) criterion = torch.nn.CrossEntropyLoss(reduction="sum") # must sum over samples rather than averaging model_optimizer = torch.optim.SGD(model.parameters(), lr=0.08) logger.warning(f"initial model weight is {model.weight.detach().cpu().squeeze()}") logger.warning(f"initial model clone weight is {model_clone.weight.detach().cpu().squeeze()}") # prepare artifacts - accelerator handles device placement and dataloader splitting model, model_optimizer = accelerator.prepare(model, model_optimizer) dataloader = accelerator.prepare_data_loader(dataloader, device_placement=True) training_iterator = iter(dataloader) num_samples_in_epoch = len(dataloader) remainder = num_samples_in_epoch % gradient_accumulation_steps remainder = remainder if remainder != 0 else gradient_accumulation_steps total_gradient_updates = math.ceil(num_samples_in_epoch / gradient_accumulation_steps) total_batched_samples = 0 for update_step in range(total_gradient_updates): # In order to correctly the total number of non-padded tokens on which we'll compute the cross-entropy loss # we need to pre-load the full local batch - i.e the next per_device_batch_size * accumulation_steps samples batch_samples = [] num_batches_in_step = gradient_accumulation_steps if update_step != (total_gradient_updates - 1) else remainder for _ in range(num_batches_in_step): batch_samples += [next(training_iterator)] # get local num items in batch local_num_items_in_batch = sum([(batch["labels"].ne(-100)).sum() for batch in batch_samples]) logger.warning(f"Step {update_step} - Device {accelerator.process_index} - num items in the local batch {local_num_items_in_batch}", main_process_only=False) # to compute it correctly in a multi-device DDP training, we need to gather the total number of items in the full batch. num_items_in_batch = accelerator.gather(local_num_items_in_batch).sum().item() logger.warning(f"Total num items {num_items_in_batch}") for i, batch in enumerate(batch_samples): inputs, labels = batch["input_ids"], batch["labels"] total_batched_samples += 1 # if we perform gradient accumulation in a multi-devices set-up, we want to avoid unecessary communications when accumulating # cf: https://muellerzr.github.io/blog/gradient_accumulation.html if (i < len(batch_samples) - 1 and accelerator.num_processes > 1): ctx = model.no_sync else: ctx = contextlib.nullcontext with ctx(): outputs = model(inputs) loss = criterion(outputs.view(-1, 2), labels.view(-1).to(torch.int64)) # We multiply by num_processes because the DDP calculates the average gradient across all devices whereas dividing by num_items_in_batch already takes into account all devices # Same reason for gradient_accumulation_steps, but this times it's Accelerate that calculate the average gradient across the accumulated steps loss = (loss * gradient_accumulation_steps * accelerator.num_processes) / num_items_in_batch accelerator.backward(loss) model_optimizer.step() model_optimizer.zero_grad() logger.warning(f"Device {accelerator.process_index} - w/ accumulation, the final model weight is {accelerator.unwrap_model(model).weight.detach().cpu().squeeze()}", main_process_only=False) # We know do the same operation but on a single device and without gradient accumulation if accelerator.is_main_process: # prepare one single entire batch dataloader = DataLoader(dataset, batch_size=len(dataset), collate_fn=collate_fn) full_batch_without_accum = next(iter(dataloader)) total_inputs, total_labels = full_batch_without_accum["input_ids"], full_batch_without_accum["labels"] model_clone_optimizer = torch.optim.SGD(model_clone.parameters(), lr=0.08) # train the cloned model loss = torch.nn.CrossEntropyLoss(reduction="mean")(model_clone(total_inputs).view(-1, 2), total_labels.view(-1).to(torch.int64)) model_clone_optimizer.zero_grad() loss.backward() model_clone_optimizer.step() # We should have the same final weights. logger.warning(f"w/o accumulation, the final model weight is {model_clone.weight.detach().cpu().squeeze()}") ``` Results on a single device - gradient accumulation steps set to 1 and batch_size set to 8: ``` initial model weight is tensor([-0.0075, 0.5364]) initial model clone weight is tensor([-0.0075, 0.5364]) Step 0 - Device 0 - num items in the local batch 36 Total num items 36 Device 0 - w/ accumulation, the final model weight is tensor([0.0953, 0.4337]) w/o accumulation, the final model weight is tensor([0.0953, 0.4337]) ``` Results on a two devices set-up - gradient accumulation steps set to 2 and batch_size set to 4. ``` initial model weight is tensor([-0.0075, 0.5364]) initial model clone weight is tensor([-0.0075, 0.5364]) Step 0 - Device 0 - num items in the local batch 52 Step 0 - Device 1 - num items in the local batch 84 Total num items 136 Device 1 - w/ accumulation, the final model weight is tensor([0.2117, 0.3172]) Device 0 - w/ accumulation, the final model weight is tensor([0.2117, 0.3172]) w/o accumulation, the final model weight is tensor([0.2117, 0.3172]) ``` ### To go further: Please find a complete example script on a real world training run in the examples folder at the path [`accelerate/examples/by_feature/gradient_accumulation_for_autoregressive_models.py`](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/gradient_accumulation_for_autoregressive_models.py). Running it on several training configurations with constant global batch size equal to 32 gives the following graph: <div style="text-align: center"> <img src="https://huggingface.co/datasets/hf-audio/gradient_accumulation_example/resolve/main/training_losses.png"> </div> Note that the training losses are exactly the same up to training step 20. The small deviation after this training step occurs at the very end of the first epoch, because, by [default](https://huggingface.co/docs/accelerate/en/package_reference/torch_wrappers#accelerate.data_loader.prepare_data_loader.even_batches), the dataloader duplicates the samples at the beginning of the dataset when the total batch size doesn't exactly divide the dataset.
accelerate/docs/source/usage_guides/gradient_accumulation.md/0
{ "file_path": "accelerate/docs/source/usage_guides/gradient_accumulation.md", "repo_id": "accelerate", "token_count": 6972 }
# Config Zoo This folder contains a variety of minimal configurations for `Accelerate` achieving certain goals. You can use these direct config YAML's, or build off of them for your own YAML's. These are highly annoted versions, aiming to teach you what each section does. Each config can be run via `accelerate launch --config_file {file} run_me.py` `run_me.py` will then print out how the current environment is setup (the contents of the `AcceleratorState`)
accelerate/examples/config_yaml_templates/README.md/0
{ "file_path": "accelerate/examples/config_yaml_templates/README.md", "repo_id": "accelerate", "token_count": 124 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pathlib import queue from concurrent.futures import ThreadPoolExecutor from typing import Union import fire import scipy.io.wavfile import torch from datasets import load_dataset from transformers import AutoTokenizer, VitsModel from accelerate import PartialState from accelerate.utils import tqdm """ Requirements: transformers accelerate fire scipy datasets pip install transformers accelerate fire scipy datasets Example usage: accelerate launch distributed_speech_generation.py --output_path outputs --batch_size 8 --num_workers 2 --dataset_split train """ """ To run the speech generation import scipy.io.wavfile import numpy as np from IPython.display import Audio sample_rate, audio_data = scipy.io.wavfile.read('path_to_you_wav_file.wav') audio_data = audio_data.astype(np.float32) / 32762.0 Audio(audio_data, rate=sample_rate) """ def load_pokemon_data(split: str, max_text_length: int): """Load Pokemon descriptions from the dataset""" ds = load_dataset("svjack/pokemon-blip-captions-en-zh", split=split) # Create dataset of dictionaries dataset = [] for idx, text in enumerate(ds["en_text"]): if len(text.strip()) > 0: # Skip empty descriptions dataset.append( { "id": f"pokemon_{idx:06d}", "text": text.strip()[:max_text_length], # Truncate long descriptions "original_text": text.strip(), # Keep original for metadata } ) return dataset class ExistsFilter: def __init__(self, output_dir: Union[pathlib.Path, str]): current_files = [f.split(".wav")[0] for f in os.listdir(output_dir) if f.endswith(".wav")] self.processed_files = set(current_files) print(f"Existing audio files found: {len(self.processed_files)}.") def __call__(self, x): return x["id"] not in self.processed_files def preprocess_fn(sample, tokenizer, max_text_length: int): inputs = tokenizer(sample["text"], padding=False, truncation=True, max_length=max_text_length, return_tensors="pt") return { "input_ids": inputs["input_ids"][0].tolist(), "attention_mask": inputs["attention_mask"][0].tolist(), "id": sample["id"], "text": sample["text"], "original_text": sample["original_text"], } def collate_fn(examples, tokenizer): """Collate batch of examples with proper padding""" # Find max length in this batch max_length = max(len(example["input_ids"]) for example in examples) # Pad sequences to max_length input_ids_list = [] attention_mask_list = [] for example in examples: # Get current lengths curr_len = len(example["input_ids"]) padding_length = max_length - curr_len # Pad sequences padded_input_ids = example["input_ids"] + [tokenizer.pad_token_id] * padding_length padded_attention_mask = example["attention_mask"] + [0] * padding_length input_ids_list.append(padded_input_ids) attention_mask_list.append(padded_attention_mask) # Convert to tensors input_ids = torch.tensor(input_ids_list, dtype=torch.long) attention_mask = torch.tensor(attention_mask_list, dtype=torch.long) ids = [example["id"] for example in examples] texts = [example["text"] for example in examples] original_texts = [example["original_text"] for example in examples] return { "input_ids": input_ids, "attention_mask": attention_mask, "ids": ids, "texts": texts, "original_texts": original_texts, } def create_dataloader(dataset, batch_size, distributed_state, tokenizer): """Create dataloader with preprocessing""" processed_dataset = [preprocess_fn(item, tokenizer, max_text_length=200) for item in dataset] # Split dataset for distributed processing if distributed_state.num_processes > 1: chunk_size = len(processed_dataset) // distributed_state.num_processes start_idx = distributed_state.process_index * chunk_size end_idx = ( start_idx + chunk_size if distributed_state.process_index < distributed_state.num_processes - 1 else len(processed_dataset) ) processed_dataset = processed_dataset[start_idx:end_idx] # Create batches batches = [] for i in range(0, len(processed_dataset), batch_size): batch = processed_dataset[i : i + batch_size] batches.append(collate_fn(batch, tokenizer)) return batches def save_results(output_queue: queue.Queue, output_dir: pathlib.Path, sampling_rate: int): while True: try: item = output_queue.get(timeout=5) if item is None: break waveforms, ids, texts, original_texts = item # Save each audio file and its metadata for waveform, file_id, text, original_text in zip(waveforms, ids, texts, original_texts): # Save audio wav_path = output_dir / f"{file_id}.wav" scipy.io.wavfile.write(wav_path, rate=sampling_rate, data=waveform.cpu().float().numpy()) # Save metadata with both truncated and original text metadata = { "text_used": text, "original_text": original_text, "model": "facebook/mms-tts-eng", "sampling_rate": sampling_rate, } metadata_path = output_dir / f"{file_id}_metadata.json" with metadata_path.open("w") as f: json.dump(metadata, f, indent=4) except queue.Empty: continue def main( output_path: str = "speech_data", batch_size: int = 8, num_workers: int = 2, dataset_split: str = "train", model_name: str = "facebook/mms-tts-eng", max_text_length: int = 200, ): output_dir = pathlib.Path(output_path) output_dir.mkdir(parents=True, exist_ok=True) distributed_state = PartialState() # Load model and tokenizer model = VitsModel.from_pretrained( model_name, device_map=distributed_state.device, torch_dtype=torch.float32, ) tokenizer = AutoTokenizer.from_pretrained(model_name) # Load and filter data dataset = load_pokemon_data(dataset_split, max_text_length) exist_filter = ExistsFilter(output_dir) dataset = [item for item in dataset if exist_filter(item)] distributed_state.print(f"Processing {len(dataset)} Pokemon descriptions") # Create dataloader batches = create_dataloader(dataset, batch_size, distributed_state, tokenizer) # Setup output queue and save thread output_queue = queue.Queue() save_thread = ThreadPoolExecutor(max_workers=num_workers) save_future = save_thread.submit(save_results, output_queue, output_dir, model.config.sampling_rate) try: for batch in tqdm(batches, desc="Generating Pokemon descriptions"): with torch.no_grad(): outputs = model( input_ids=batch["input_ids"].to(distributed_state.device, dtype=torch.long), attention_mask=batch["attention_mask"].to(distributed_state.device, dtype=torch.long), ).waveform output_queue.put((outputs, batch["ids"], batch["texts"], batch["original_texts"])) finally: output_queue.put(None) save_thread.shutdown(wait=True) save_future.result() if __name__ == "__main__": fire.Fire(main)
accelerate/examples/inference/distributed/distributed_speech_generation.py/0
{ "file_path": "accelerate/examples/inference/distributed/distributed_speech_generation.py", "repo_id": "accelerate", "token_count": 3272 }
#!/bin/bash #SBATCH --job-name=multinode #SBATCH -D . #SBATCH --output=O-%x.%j #SBATCH --error=E-%x.%j #SBATCH --nodes=4 # number of nodes #SBATCH --ntasks-per-node=1 # number of MP tasks #SBATCH --gres=gpu:4 # number of GPUs per node #SBATCH --cpus-per-task=160 # number of cores per tasks #SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS) ###################### ### Set enviroment ### ###################### source activateEnvironment.sh export GPUS_PER_NODE=4 ###################### ###################### #### Set network ##### ###################### head_node_ip=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) ###################### export LAUNCHER="accelerate launch \ --num_processes $((SLURM_NNODES * GPUS_PER_NODE)) \ --num_machines $SLURM_NNODES \ --rdzv_backend c10d \ --main_process_ip $head_node_ip \ --main_process_port 29500 \ " export ACCELERATE_DIR="${ACCELERATE_DIR:-/accelerate}" export SCRIPT="${ACCELERATE_DIR}/examples/complete_nlp_example.py" export SCRIPT_ARGS=" \ --mixed_precision fp16 \ --output_dir ${ACCELERATE_DIR}/examples/output \ " # This step is necessary because accelerate launch does not handle multiline arguments properly export CMD="$LAUNCHER $PYTHON_FILE $ARGS" srun $CMD
accelerate/examples/slurm/submit_multinode.sh/0
{ "file_path": "accelerate/examples/slurm/submit_multinode.sh", "repo_id": "accelerate", "token_count": 547 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import find_packages, setup extras = {} extras["quality"] = [ "black ~= 23.1", # hf-doc-builder has a hidden dependency on `black` "hf-doc-builder >= 0.3.0", "ruff ~= 0.6.4", ] extras["docs"] = [] extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized"] extras["test_dev"] = [ "datasets", "diffusers", "evaluate", "torchdata>=0.8.0", "torchpippy>=0.2.0", "transformers", "scipy", "scikit-learn", "tqdm", "bitsandbytes", "timm", ] extras["testing"] = extras["test_prod"] + extras["test_dev"] extras["deepspeed"] = ["deepspeed"] extras["rich"] = ["rich"] extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"] extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"] extras["sagemaker"] = [ "sagemaker", # boto3 is a required package in sagemaker ] setup( name="accelerate", version="1.4.0.dev0", description="Accelerate", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning", license="Apache", author="The HuggingFace team", author_email="[email protected]", url="https://github.com/huggingface/accelerate", package_dir={"": "src"}, packages=find_packages("src"), entry_points={ "console_scripts": [ "accelerate=accelerate.commands.accelerate_cli:main", "accelerate-config=accelerate.commands.config:main", "accelerate-estimate-memory=accelerate.commands.estimate:main", "accelerate-launch=accelerate.commands.launch:main", "accelerate-merge-weights=accelerate.commands.merge:main", ] }, python_requires=">=3.9.0", install_requires=[ "numpy>=1.17,<3.0.0", "packaging>=20.0", "psutil", "pyyaml", "torch>=2.0.0", "huggingface_hub>=0.21.0", "safetensors>=0.4.3", ], extras_require=extras, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], ) # Release checklist # 1. Checkout the release branch (for a patch the current release branch, for a new minor version, create one): # git checkout -b vXX.xx-release # The -b is only necessary for creation (so remove it when doing a patch) # 2. Change the version in __init__.py and setup.py to the proper value. # 3. Commit these changes with the message: "Release: v<VERSION>" # 4. Add a tag in git to mark the release: # git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi' # Push the tag and release commit to git: git push --tags origin vXX.xx-release # 5. Run the following commands in the top-level directory: # make prepare_release # 6. Upload the package to the pypi test server first: # make target=testpypi upload_release # 7. Check that you can install it in a virtualenv by running: # make install_test_release # accelerate env # accelerate test # 8. Upload the final version to actual pypi: # make target=pypi upload_release # 9. Add release notes to the tag in github once everything is looking hunky-dory. # 10. Go back to the main branch and update the version in __init__.py, setup.py to the new version ".dev" and push to # main.
accelerate/setup.py/0
{ "file_path": "accelerate/setup.py", "repo_id": "accelerate", "token_count": 1646 }
#!/usr/bin/env python # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from huggingface_hub import model_info from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError from accelerate import init_empty_weights from accelerate.commands.utils import CustomArgumentParser from accelerate.utils import ( calculate_maximum_sizes, convert_bytes, is_timm_available, is_transformers_available, ) if is_transformers_available(): import transformers from transformers import AutoConfig, AutoModel if is_timm_available(): import timm def verify_on_hub(repo: str, token: str = None): "Verifies that the model is on the hub and returns the model info." try: return model_info(repo, token=token) except (OSError, GatedRepoError): return "gated" except RepositoryNotFoundError: return "repo" def check_has_model(error): """ Checks what library spawned `error` when a model is not found """ if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]: return "timm" elif ( is_transformers_available() and isinstance(error, OSError) and "does not appear to have a file named" in error.args[0] ): return "transformers" else: return "unknown" def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None): """ Creates an empty model in full precision from its parent library on the `Hub` to calculate the overall memory consumption. Args: model_name (`str`): The model name on the Hub library_name (`str`): The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no metadata on the Hub to determine the library. trust_remote_code (`bool`, `optional`, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. access_token (`str`, `optional`, defaults to `None`): The access token to use to access private or gated models on the Hub. (for use on the Gradio app) Returns: `torch.nn.Module`: The torch model that has been initialized on the `meta` device. """ model_info = verify_on_hub(model_name, access_token) # Simplified errors if model_info == "gated": raise GatedRepoError( f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`." ) elif model_info == "repo": raise RepositoryNotFoundError( f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo," " make sure you are authenticated via `huggingface-cli login` and have access." ) if library_name is None: library_name = getattr(model_info, "library_name", False) if not library_name: raise ValueError( f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)" ) if library_name == "transformers": if not is_transformers_available(): raise ImportError( f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`" ) print(f"Loading pretrained config for `{model_name}` from `transformers`...") if model_info.config is None: raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.") auto_map = model_info.config.get("auto_map", False) config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token) with init_empty_weights(): # remote code could specify a specific `AutoModel` class in the `auto_map` constructor = AutoModel if isinstance(auto_map, dict): value = None for key in auto_map.keys(): if key.startswith("AutoModelFor"): value = key break if value is not None: constructor = getattr(transformers, value) # we need to pass the dtype, otherwise it is going to use the torch_dtype that is saved in the config model = constructor.from_config(config, torch_dtype=torch.float32, trust_remote_code=trust_remote_code) elif library_name == "timm": if not is_timm_available(): raise ImportError( f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`" ) print(f"Loading pretrained config for `{model_name}` from `timm`...") with init_empty_weights(): model = timm.create_model(model_name, pretrained=False) else: raise ValueError( f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support." ) return model def create_ascii_table(headers: list, rows: list, title: str): "Creates a pretty table from a list of rows, minimal version of `tabulate`." sep_char, in_between = "β”‚", "─" column_widths = [] for i in range(len(headers)): column_values = [row[i] for row in rows] + [headers[i]] max_column_width = max(len(value) for value in column_values) column_widths.append(max_column_width) formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))] pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}" diff = 0 def make_row(left_char, middle_char, right_char): return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}" separator = make_row("β”œ", "β”Ό", "─") if len(title) > sum(column_widths): diff = abs(len(title) - len(separator)) column_widths[-1] += diff # Update with diff separator = make_row("β”œ", "β”Ό", "─") initial_rows = [ make_row("β”Œ", in_between, "┐"), f"{sep_char}{title.center(len(separator) - 2)}{sep_char}", make_row("β”œ", "┬", "─"), ] table = "\n".join(initial_rows) + "\n" column_widths[-1] += diff centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)] table += f"{pattern % tuple(centered_line)}\n{separator}\n" for i, line in enumerate(rows): centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)] table += f"{pattern % tuple(centered_line)}\n" table += f'β””{"β”΄".join([in_between * n for n in column_widths])}β”˜' return table def estimate_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("estimate-memory") else: parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.") parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.") parser.add_argument( "--library_name", type=str, help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.", choices=["timm", "transformers"], ) parser.add_argument( "--dtypes", type=str, nargs="+", default=["float32", "float16", "int8", "int4"], help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`", choices=["float32", "float16", "int8", "int4"], ) parser.add_argument( "--trust_remote_code", action="store_true", help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag should only be used for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.""", default=False, ) if subparsers is not None: parser.set_defaults(func=estimate_command) return parser def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict: """ Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of 1. Args: bytes (`int`): The size of the model being trained. mixed_precision (`str`): The mixed precision that would be ran. msamp_config (`str`): The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`. """ memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1} fp32_size = bytes fp16_size = bytes // 2 if mixed_precision == "float32": memory_sizes["model"] = fp32_size memory_sizes["gradients"] = fp32_size memory_sizes["optimizer"] = fp32_size * 2 memory_sizes["step"] = fp32_size * 4 elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None): # With native `TransformersEngine`, there is no memory savings with FP8 # With mixed precision training, the model has weights stored # in FP16 and FP32 memory_sizes["model"] = fp32_size # 1.5 from weight gradient + computation (GEMM) memory_sizes["gradients"] = fp32_size + fp16_size # 2x from optimizer states memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states memory_sizes["step"] = memory_sizes["optimizer"] return memory_sizes def gather_data(args): "Creates an empty model and gathers the data for the sizes" try: model = create_empty_model( args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code ) except (RuntimeError, OSError) as e: library = check_has_model(e) if library != "unknown": raise RuntimeError( f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo." ) raise e total_size, largest_layer = calculate_maximum_sizes(model) data = [] for dtype in args.dtypes: dtype_total_size = total_size dtype_largest_layer = largest_layer[0] dtype_training_size = estimate_training_usage(dtype_total_size, dtype) if dtype == "float16": dtype_total_size /= 2 dtype_largest_layer /= 2 elif dtype == "int8": dtype_total_size /= 4 dtype_largest_layer /= 4 elif dtype == "int4": dtype_total_size /= 8 dtype_largest_layer /= 8 data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size]) return data def estimate_command(args): data = gather_data(args) for row in data: for i, item in enumerate(row): if isinstance(item, (int, float)): row[i] = convert_bytes(item) elif isinstance(item, dict): training_usage = max(item.values()) row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A" headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"] title = f"Memory Usage for loading `{args.model_name}`" table = create_ascii_table(headers, data, title) print(table) def main(): parser = estimate_command_parser() args = parser.parse_args() estimate_command(args) if __name__ == "__main__": main()
accelerate/src/accelerate/commands/estimate.py/0
{ "file_path": "accelerate/src/accelerate/commands/estimate.py", "repo_id": "accelerate", "token_count": 5035 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import Accelerator, DistributedType class LocalSGD: """ A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently on each device, and averages model weights every K synchronization step. It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular, this is a simple implementation that cannot support scenarios such as model parallelism. Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes back to at least: Zhang, J., De Sa, C., Mitliagkas, I., & RΓ©, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint arXiv:1606.07365.](https://arxiv.org/abs/1606.07365) We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of). Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767) """ def __enter__(self): if self.enabled: self.model_sync_obj = self.model.no_sync() self.model_sync_obj.__enter__() return self def __exit__(self, type, value, tb): if self.enabled: # Average all models on exit self._sync_and_avg_model_params() self.model_sync_obj.__exit__(type, value, tb) def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True): """ Constructor. Args: model (`torch.nn.Module): The model whose parameters we need to average. accelerator (`Accelerator`): Accelerator object. local_sgd_steps (`int`): A number of local SGD steps (before model parameters are synchronized). enabled (`bool): Local SGD is disabled if this parameter set to `False`. """ if accelerator.distributed_type not in [ DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.MULTI_XPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, ]: raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)") self.enabled = enabled and accelerator.distributed_type != DistributedType.NO self.num_steps = 0 if self.enabled: self.accelerator = accelerator self.model = model self.local_sgd_steps = local_sgd_steps def step(self): """ This function makes a "step" and synchronizes model parameters if necessary. """ self.num_steps += 1 if not self.enabled: return if self.num_steps % self.local_sgd_steps == 0: self._sync_and_avg_model_params() def _sync_and_avg_model_params(self): """ Synchronize + Average model parameters across all GPUs """ self.accelerator.wait_for_everyone() with self.accelerator.autocast(): for param in self.model.parameters(): param.data = self.accelerator.reduce(param.data, reduction="mean")
accelerate/src/accelerate/local_sgd.py/0
{ "file_path": "accelerate/src/accelerate/local_sgd.py", "repo_id": "accelerate", "token_count": 1577 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch.distributed from accelerate.test_utils import require_huggingface_suite, torch_device from accelerate.utils import is_transformers_available if is_transformers_available(): from transformers import AutoModel, TrainingArguments GPT2_TINY = "sshleifer/tiny-gpt2" @require_huggingface_suite def init_torch_dist_then_launch_deepspeed(): backend = "ccl" if torch_device == "xpu" else "nccl" torch.distributed.init_process_group(backend=backend) deepspeed_config = { "zero_optimization": { "stage": 3, }, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", } train_args = TrainingArguments( output_dir="./", deepspeed=deepspeed_config, ) model = AutoModel.from_pretrained(GPT2_TINY) assert train_args is not None assert model is not None def main(): init_torch_dist_then_launch_deepspeed() if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py", "repo_id": "accelerate", "token_count": 545 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import json import os from copy import deepcopy from torch import optim from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler from .dataclasses import DistributedType from .imports import is_bnb_available from .versions import compare_versions def map_pytorch_optim_to_deepspeed(optimizer): """ Args: optimizer: torch.optim.Optimizer Returns the DeepSeedCPUOptimizer (deepspeed.ops) version of the optimizer. """ defaults = {k: v for k, v in optimizer.defaults.items() if k in ["lr", "weight_decay"]} # Select the DeepSpeedCPUOptimizer based on the original optimizer class. # DeepSpeedCPUAdam is the default from deepspeed.ops.adam import DeepSpeedCPUAdam optimizer_class = DeepSpeedCPUAdam # For DeepSpeedCPUAdam (adamw_mode) if compare_versions("deepspeed", ">=", "0.3.1"): defaults["adamw_mode"] = False is_adaw = isinstance(optimizer, optim.AdamW) if is_bnb_available() and not is_adaw: import bitsandbytes.optim as bnb_opt if isinstance(optimizer, (bnb_opt.AdamW, bnb_opt.AdamW32bit)): try: is_adaw = optimizer.optim_bits == 32 except AttributeError: is_adaw = optimizer.args.optim_bits == 32 else: is_adaw = False if is_adaw: defaults["adamw_mode"] = True # For DeepSpeedCPUAdagrad if compare_versions("deepspeed", ">=", "0.5.5"): # Check if the optimizer is PyTorch's Adagrad. is_ada = isinstance(optimizer, optim.Adagrad) # If not, and bitsandbytes is available, # # check if the optimizer is the 32-bit bitsandbytes Adagrad. if is_bnb_available() and not is_ada: import bitsandbytes.optim as bnb_opt if isinstance(optimizer, (bnb_opt.Adagrad, bnb_opt.Adagrad32bit)): try: is_ada = optimizer.optim_bits == 32 except AttributeError: is_ada = optimizer.args.optim_bits == 32 if is_ada: from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad optimizer_class = DeepSpeedCPUAdagrad # For DeepSpeedCPULion if is_bnb_available(min_version="0.38.0") and compare_versions("deepspeed", ">=", "0.11.0"): from bitsandbytes.optim import Lion, Lion32bit if isinstance(optimizer, (Lion, Lion32bit)): try: is_bnb_32bits = optimizer.optim_bits == 32 except AttributeError: is_bnb_32bits = optimizer.args.optim_bits == 32 if is_bnb_32bits: from deepspeed.ops.lion import DeepSpeedCPULion optimizer_class = DeepSpeedCPULion return optimizer_class(optimizer.param_groups, **defaults) def get_active_deepspeed_plugin(state): """ Returns the currently active DeepSpeedPlugin. Raises: ValueError: If DeepSpeed was not enabled and this function is called. """ if state.distributed_type != DistributedType.DEEPSPEED: raise ValueError( "Couldn't retrieve the active `DeepSpeedPlugin` as none were enabled. " "Please make sure that either `Accelerator` is configured for `deepspeed` " "or make sure that the desired `DeepSpeedPlugin` has been enabled (`AcceleratorState().select_deepspeed_plugin(name)`) " "before calling this function." ) if not isinstance(state.deepspeed_plugins, dict): return state.deepspeed_plugins return next(plugin for plugin in state.deepspeed_plugins.values() if plugin.selected) class HfDeepSpeedConfig: """ This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. A `weakref` of this object is stored in the module's globals to be able to access the config from areas where things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore it's important that this object remains alive while the program is still running. [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic the DeepSpeed configuration is not modified in any way. Args: config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. """ def __init__(self, config_file_or_dict): if isinstance(config_file_or_dict, dict): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden config = deepcopy(config_file_or_dict) elif os.path.exists(config_file_or_dict): with open(config_file_or_dict, encoding="utf-8") as f: config = json.load(f) else: try: config_decoded = base64.urlsafe_b64decode(config_file_or_dict).decode("utf-8") config = json.loads(config_decoded) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" ) self.config = config self.set_stage_and_offload() def set_stage_and_offload(self): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. self._stage = self.get_value("zero_optimization.stage", -1) # offload self._offload = False if self.is_zero2() or self.is_zero3(): offload_devices_valid = set(["cpu", "nvme"]) offload_devices = set( [ self.get_value("zero_optimization.offload_optimizer.device"), self.get_value("zero_optimization.offload_param.device"), ] ) if len(offload_devices & offload_devices_valid) > 0: self._offload = True def find_config_node(self, ds_key_long): config = self.config # find the config node of interest if it exists nodes = ds_key_long.split(".") ds_key = nodes.pop() for node in nodes: config = config.get(node) if config is None: return None, ds_key return config, ds_key def get_value(self, ds_key_long, default=None): """ Returns the set value or `default` if no value is set """ config, ds_key = self.find_config_node(ds_key_long) if config is None: return default return config.get(ds_key, default) def del_config_sub_tree(self, ds_key_long, must_exist=False): """ Deletes a sub-section of the config file if it's found. Unless `must_exist` is `True` the section doesn't have to exist. """ config = self.config # find the config node of interest if it exists nodes = ds_key_long.split(".") for node in nodes: parent_config = config config = config.get(node) if config is None: if must_exist: raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}") else: return # if found remove it if parent_config is not None: parent_config.pop(node) def is_true(self, ds_key_long): """ Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set). """ value = self.get_value(ds_key_long) return False if value is None else bool(value) def is_false(self, ds_key_long): """ Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set). """ value = self.get_value(ds_key_long) return False if value is None else not bool(value) def is_zero2(self): return self._stage == 2 def is_zero3(self): return self._stage == 3 def is_offload(self): return self._offload class DeepSpeedEngineWrapper: """ Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop. Args: engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap """ def __init__(self, engine): self.engine = engine def backward(self, loss, **kwargs): # runs backpropagation and handles mixed precision self.engine.backward(loss, **kwargs) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class DeepSpeedOptimizerWrapper(AcceleratedOptimizer): """ Internal wrapper around a deepspeed optimizer. Args: optimizer (`torch.optim.optimizer.Optimizer`): The optimizer to wrap. """ def __init__(self, optimizer): super().__init__(optimizer, device_placement=False, scaler=None) self.__has_overflow__ = hasattr(self.optimizer, "overflow") def zero_grad(self, set_to_none=None): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def step(self): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def step_was_skipped(self): """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" if self.__has_overflow__: return self.optimizer.overflow return False class DeepSpeedSchedulerWrapper(AcceleratedScheduler): """ Internal wrapper around a deepspeed scheduler. Args: scheduler (`torch.optim.lr_scheduler.LambdaLR`): The scheduler to wrap. optimizers (one or a list of `torch.optim.Optimizer`): """ def __init__(self, scheduler, optimizers): super().__init__(scheduler, optimizers) def step(self): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class DummyOptim: """ Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training loop when optimizer config is specified in the deepspeed config file. Args: lr (float): Learning rate. params (iterable): iterable of parameters to optimize or dicts defining parameter groups weight_decay (float): Weight decay. **kwargs (additional keyword arguments, *optional*): Other arguments. """ def __init__(self, params, lr=0.001, weight_decay=0, **kwargs): self.params = params self.lr = lr self.weight_decay = weight_decay self.kwargs = kwargs class DummyScheduler: """ Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training loop when scheduler config is specified in the deepspeed config file. Args: optimizer (`torch.optim.optimizer.Optimizer`): The optimizer to wrap. total_num_steps (int, *optional*): Total number of steps. warmup_num_steps (int, *optional*): Number of steps for warmup. lr_scheduler_callable (callable, *optional*): A callable function that creates an LR Scheduler. It accepts only one argument `optimizer`. **kwargs (additional keyword arguments, *optional*): Other arguments. """ def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs): self.optimizer = optimizer self.total_num_steps = total_num_steps self.warmup_num_steps = warmup_num_steps self.lr_scheduler_callable = lr_scheduler_callable self.kwargs = kwargs
accelerate/src/accelerate/utils/deepspeed.py/0
{ "file_path": "accelerate/src/accelerate/utils/deepspeed.py", "repo_id": "accelerate", "token_count": 5530 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC torch_version = parse(importlib.metadata.version("torch")) def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): """ Compares a library version to some requirement using a given operation. Args: library_or_version (`str` or `packaging.version.Version`): A library name or a version to check. operation (`str`): A string representation of an operator, such as `">"` or `"<="`. requirement_version (`str`): The version to compare the library version against """ if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") operation = STR_OPERATION_TO_FUNC[operation] if isinstance(library_or_version, str): library_or_version = parse(importlib.metadata.version(library_or_version)) return operation(library_or_version, parse(requirement_version)) def is_torch_version(operation: str, version: str): """ Compares the current PyTorch version to a given reference with an operation. Args: operation (`str`): A string representation of an operator, such as `">"` or `"<="` version (`str`): A string version of PyTorch """ return compare_versions(torch_version, operation, version)
accelerate/src/accelerate/utils/versions.py/0
{ "file_path": "accelerate/src/accelerate/utils/versions.py", "repo_id": "accelerate", "token_count": 701 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu, require_non_cpu @require_cpu class CPUOptimizerTester(unittest.TestCase): def test_accelerated_optimizer_pickling(self): model = torch.nn.Linear(10, 10) optimizer = torch.optim.SGD(model.parameters(), 0.1) accelerator = Accelerator() optimizer = accelerator.prepare(optimizer) try: pickle.loads(pickle.dumps(optimizer)) except Exception as e: self.fail(f"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state() @require_non_cpu class OptimizerTester(unittest.TestCase): def test_accelerated_optimizer_step_was_skipped(self): model = torch.nn.Linear(5, 5) optimizer = torch.optim.SGD(model.parameters(), 0.1) accelerator = Accelerator(mixed_precision="fp16") model, optimizer = accelerator.prepare(model, optimizer) loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): # Fake the gradients, as if there's no overflow p.grad.fill_(0.01) optimizer.step() assert optimizer.step_was_skipped is False loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): p.grad.fill_(0.01) # Manually set the gradients to be NaN, as if there's an overflow p.grad[0] = torch.tensor(float("nan")) optimizer.step() assert optimizer.step_was_skipped is True loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): p.grad.fill_(0.01) # Manually set the gradients to be NaN, as if there's an overflow p.grad[0] = torch.tensor(float("nan")) optimizer.step() assert optimizer.step_was_skipped is True loss = model(torch.randn(2, 5, device=accelerator.device)).sum() accelerator.backward(loss) for p in model.parameters(): # Fake the gradients, as if there's no overflow p.grad.fill_(0.01) optimizer.step() assert optimizer.step_was_skipped is False AcceleratorState._reset_state()
accelerate/tests/test_optimizer.py/0
{ "file_path": "accelerate/tests/test_optimizer.py", "repo_id": "accelerate", "token_count": 1194 }
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{DType, Device, Tensor}; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn run( x: &Tensor, k: &Tensor, padding: usize, output_padding: usize, stride: usize, dilation: usize, ) { x.conv_transpose2d(k, padding, output_padding, stride, dilation) .unwrap(); } fn run_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) { let t = Tensor::arange(0.0f32, 10000.0, device) .unwrap() .reshape((1, 4, 50, 50)) .unwrap() .to_dtype(dtype) .unwrap(); let kernel = Tensor::arange(0.0f32, 100.0, device) .unwrap() .reshape((4, 1, 5, 5)) .unwrap() .to_dtype(dtype) .unwrap(); let flops = t.dims().iter().product::<usize>() * dtype.size_in_bytes(); let mut group = c.benchmark_group(device.bench_name(name)); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run(black_box(&t), black_box(&kernel), 1, 0, 1, 2); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let handler = BenchDeviceHandler::new().unwrap(); for device in handler.devices { run_benchmark(c, &device, DType::F32, "conv_transpose2d_f32"); run_benchmark(c, &device, DType::F16, "conv_transpose2d_f16"); run_benchmark(c, &device, DType::BF16, "conv_transpose2d_bf16"); } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/conv_transpose2d.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/conv_transpose2d.rs", "repo_id": "candle", "token_count": 826 }
//! Implement conversion traits for tensors use crate::{DType, Device, Error, Tensor, WithDType}; use half::{bf16, f16, slice::HalfFloatSliceExt}; use std::convert::TryFrom; impl<T: WithDType> TryFrom<&Tensor> for Vec<T> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec1::<T>() } } impl<T: WithDType> TryFrom<&Tensor> for Vec<Vec<T>> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec2::<T>() } } impl<T: WithDType> TryFrom<&Tensor> for Vec<Vec<Vec<T>>> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec3::<T>() } } impl<T: WithDType> TryFrom<Tensor> for Vec<T> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<T>::try_from(&tensor) } } impl<T: WithDType> TryFrom<Tensor> for Vec<Vec<T>> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<Vec<T>>::try_from(&tensor) } } impl<T: WithDType> TryFrom<Tensor> for Vec<Vec<Vec<T>>> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<Vec<Vec<T>>>::try_from(&tensor) } } impl<T: WithDType> TryFrom<&[T]> for Tensor { type Error = Error; fn try_from(v: &[T]) -> Result<Self, Self::Error> { Tensor::from_slice(v, v.len(), &Device::Cpu) } } impl<T: WithDType> TryFrom<Vec<T>> for Tensor { type Error = Error; fn try_from(v: Vec<T>) -> Result<Self, Self::Error> { let len = v.len(); Tensor::from_vec(v, len, &Device::Cpu) } } macro_rules! from_tensor { ($typ:ident) => { impl TryFrom<&Tensor> for $typ { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_scalar::<$typ>() } } impl TryFrom<Tensor> for $typ { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { $typ::try_from(&tensor) } } impl TryFrom<$typ> for Tensor { type Error = Error; fn try_from(v: $typ) -> Result<Self, Self::Error> { Tensor::new(v, &Device::Cpu) } } }; } from_tensor!(f64); from_tensor!(f32); from_tensor!(f16); from_tensor!(bf16); from_tensor!(i64); from_tensor!(u32); from_tensor!(u8); impl Tensor { pub fn write_bytes<W: std::io::Write>(&self, f: &mut W) -> crate::Result<()> { use byteorder::{LittleEndian, WriteBytesExt}; let vs = self.flatten_all()?; match self.dtype() { DType::BF16 => { let vs = vs.to_vec1::<bf16>()?; for &v in vs.reinterpret_cast() { f.write_u16::<LittleEndian>(v)? } } DType::F16 => { let vs = vs.to_vec1::<f16>()?; for &v in vs.reinterpret_cast() { f.write_u16::<LittleEndian>(v)? } } DType::F32 => { // TODO: Avoid using a buffer when data is already on the CPU. for v in vs.to_vec1::<f32>()? { f.write_f32::<LittleEndian>(v)? } } DType::F64 => { for v in vs.to_vec1::<f64>()? { f.write_f64::<LittleEndian>(v)? } } DType::U32 => { for v in vs.to_vec1::<u32>()? { f.write_u32::<LittleEndian>(v)? } } DType::I64 => { for v in vs.to_vec1::<i64>()? { f.write_i64::<LittleEndian>(v)? } } DType::U8 => { let vs = vs.to_vec1::<u8>()?; f.write_all(&vs)?; } } Ok(()) } }
candle/candle-core/src/convert.rs/0
{ "file_path": "candle/candle-core/src/convert.rs", "repo_id": "candle", "token_count": 2242 }
//! Pretty printing of tensors //! //! This implementation should be in line with the [PyTorch version](https://github.com/pytorch/pytorch/blob/7b419e8513a024e172eae767e24ec1b849976b13/torch/_tensor_str.py). //! use crate::{DType, Result, Tensor, WithDType}; use half::{bf16, f16}; impl Tensor { fn fmt_dt<T: WithDType + std::fmt::Display>( &self, f: &mut std::fmt::Formatter, ) -> std::fmt::Result { let device_str = match self.device().location() { crate::DeviceLocation::Cpu => "".to_owned(), crate::DeviceLocation::Cuda { gpu_id } => { format!(", cuda:{}", gpu_id) } crate::DeviceLocation::Metal { gpu_id } => { format!(", metal:{}", gpu_id) } }; write!(f, "Tensor[")?; match self.dims() { [] => { if let Ok(v) = self.to_scalar::<T>() { write!(f, "{v}")? } } [s] if *s < 10 => { if let Ok(vs) = self.to_vec1::<T>() { for (i, v) in vs.iter().enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{v}")?; } } } dims => { write!(f, "dims ")?; for (i, d) in dims.iter().enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{d}")?; } } } write!(f, "; {}{}]", self.dtype().as_str(), device_str) } } impl std::fmt::Debug for Tensor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self.dtype() { DType::U8 => self.fmt_dt::<u8>(f), DType::U32 => self.fmt_dt::<u32>(f), DType::I64 => self.fmt_dt::<i64>(f), DType::BF16 => self.fmt_dt::<bf16>(f), DType::F16 => self.fmt_dt::<f16>(f), DType::F32 => self.fmt_dt::<f32>(f), DType::F64 => self.fmt_dt::<f64>(f), } } } /// Options for Tensor pretty printing #[derive(Debug, Clone)] pub struct PrinterOptions { pub precision: usize, pub threshold: usize, pub edge_items: usize, pub line_width: usize, pub sci_mode: Option<bool>, } static PRINT_OPTS: std::sync::Mutex<PrinterOptions> = std::sync::Mutex::new(PrinterOptions::const_default()); impl PrinterOptions { // We cannot use the default trait as it's not const. const fn const_default() -> Self { Self { precision: 4, threshold: 1000, edge_items: 3, line_width: 80, sci_mode: None, } } } pub fn print_options() -> &'static std::sync::Mutex<PrinterOptions> { &PRINT_OPTS } pub fn set_print_options(options: PrinterOptions) { *PRINT_OPTS.lock().unwrap() = options } pub fn set_print_options_default() { *PRINT_OPTS.lock().unwrap() = PrinterOptions::const_default() } pub fn set_print_options_short() { *PRINT_OPTS.lock().unwrap() = PrinterOptions { precision: 2, threshold: 1000, edge_items: 2, line_width: 80, sci_mode: None, } } pub fn set_print_options_full() { *PRINT_OPTS.lock().unwrap() = PrinterOptions { precision: 4, threshold: usize::MAX, edge_items: 3, line_width: 80, sci_mode: None, } } pub fn set_line_width(line_width: usize) { PRINT_OPTS.lock().unwrap().line_width = line_width } pub fn set_precision(precision: usize) { PRINT_OPTS.lock().unwrap().precision = precision } pub fn set_edge_items(edge_items: usize) { PRINT_OPTS.lock().unwrap().edge_items = edge_items } pub fn set_threshold(threshold: usize) { PRINT_OPTS.lock().unwrap().threshold = threshold } pub fn set_sci_mode(sci_mode: Option<bool>) { PRINT_OPTS.lock().unwrap().sci_mode = sci_mode } struct FmtSize { current_size: usize, } impl FmtSize { fn new() -> Self { Self { current_size: 0 } } fn final_size(self) -> usize { self.current_size } } impl std::fmt::Write for FmtSize { fn write_str(&mut self, s: &str) -> std::fmt::Result { self.current_size += s.len(); Ok(()) } } trait TensorFormatter { type Elem: WithDType; fn fmt<T: std::fmt::Write>(&self, v: Self::Elem, max_w: usize, f: &mut T) -> std::fmt::Result; fn max_width(&self, to_display: &Tensor) -> usize { let mut max_width = 1; if let Ok(vs) = to_display.flatten_all().and_then(|t| t.to_vec1()) { for &v in vs.iter() { let mut fmt_size = FmtSize::new(); let _res = self.fmt(v, 1, &mut fmt_size); max_width = usize::max(max_width, fmt_size.final_size()) } } max_width } fn write_newline_indent(i: usize, f: &mut std::fmt::Formatter) -> std::fmt::Result { writeln!(f)?; for _ in 0..i { write!(f, " ")? } Ok(()) } fn fmt_tensor( &self, t: &Tensor, indent: usize, max_w: usize, summarize: bool, po: &PrinterOptions, f: &mut std::fmt::Formatter, ) -> std::fmt::Result { let dims = t.dims(); let edge_items = po.edge_items; write!(f, "[")?; match dims { [] => { if let Ok(v) = t.to_scalar::<Self::Elem>() { self.fmt(v, max_w, f)? } } [v] if summarize && *v > 2 * edge_items => { if let Ok(vs) = t .narrow(0, 0, edge_items) .and_then(|t| t.to_vec1::<Self::Elem>()) { for v in vs.into_iter() { self.fmt(v, max_w, f)?; write!(f, ", ")?; } } write!(f, "...")?; if let Ok(vs) = t .narrow(0, v - edge_items, edge_items) .and_then(|t| t.to_vec1::<Self::Elem>()) { for v in vs.into_iter() { write!(f, ", ")?; self.fmt(v, max_w, f)?; } } } [_] => { let elements_per_line = usize::max(1, po.line_width / (max_w + 2)); if let Ok(vs) = t.to_vec1::<Self::Elem>() { for (i, v) in vs.into_iter().enumerate() { if i > 0 { if i % elements_per_line == 0 { write!(f, ",")?; Self::write_newline_indent(indent, f)? } else { write!(f, ", ")?; } } self.fmt(v, max_w, f)? } } } _ => { if summarize && dims[0] > 2 * edge_items { for i in 0..edge_items { match t.get(i) { Ok(t) => self.fmt_tensor(&t, indent + 1, max_w, summarize, po, f)?, Err(e) => write!(f, "{e:?}")?, } write!(f, ",")?; Self::write_newline_indent(indent, f)? } write!(f, "...")?; Self::write_newline_indent(indent, f)?; for i in dims[0] - edge_items..dims[0] { match t.get(i) { Ok(t) => self.fmt_tensor(&t, indent + 1, max_w, summarize, po, f)?, Err(e) => write!(f, "{e:?}")?, } if i + 1 != dims[0] { write!(f, ",")?; Self::write_newline_indent(indent, f)? } } } else { for i in 0..dims[0] { match t.get(i) { Ok(t) => self.fmt_tensor(&t, indent + 1, max_w, summarize, po, f)?, Err(e) => write!(f, "{e:?}")?, } if i + 1 != dims[0] { write!(f, ",")?; Self::write_newline_indent(indent, f)? } } } } } write!(f, "]")?; Ok(()) } } struct FloatFormatter<S: WithDType> { int_mode: bool, sci_mode: bool, precision: usize, _phantom: std::marker::PhantomData<S>, } impl<S> FloatFormatter<S> where S: WithDType + num_traits::Float + std::fmt::Display, { fn new(t: &Tensor, po: &PrinterOptions) -> Result<Self> { let mut int_mode = true; let mut sci_mode = false; // Rather than containing all values, this should only include // values that end up being displayed according to [threshold]. let values = t .flatten_all()? .to_vec1()? .into_iter() .filter(|v: &S| v.is_finite() && !v.is_zero()) .collect::<Vec<_>>(); if !values.is_empty() { let mut nonzero_finite_min = S::max_value(); let mut nonzero_finite_max = S::min_value(); for &v in values.iter() { let v = v.abs(); if v < nonzero_finite_min { nonzero_finite_min = v } if v > nonzero_finite_max { nonzero_finite_max = v } } for &value in values.iter() { if value.ceil() != value { int_mode = false; break; } } if let Some(v1) = S::from(1000.) { if let Some(v2) = S::from(1e8) { if let Some(v3) = S::from(1e-4) { sci_mode = nonzero_finite_max / nonzero_finite_min > v1 || nonzero_finite_max > v2 || nonzero_finite_min < v3 } } } } match po.sci_mode { None => {} Some(v) => sci_mode = v, } Ok(Self { int_mode, sci_mode, precision: po.precision, _phantom: std::marker::PhantomData, }) } } impl<S> TensorFormatter for FloatFormatter<S> where S: WithDType + num_traits::Float + std::fmt::Display + std::fmt::LowerExp, { type Elem = S; fn fmt<T: std::fmt::Write>(&self, v: Self::Elem, max_w: usize, f: &mut T) -> std::fmt::Result { if self.sci_mode { write!( f, "{v:width$.prec$e}", v = v, width = max_w, prec = self.precision ) } else if self.int_mode { if v.is_finite() { write!(f, "{v:width$.0}.", v = v, width = max_w - 1) } else { write!(f, "{v:max_w$.0}") } } else { write!( f, "{v:width$.prec$}", v = v, width = max_w, prec = self.precision ) } } } struct IntFormatter<S: WithDType> { _phantom: std::marker::PhantomData<S>, } impl<S: WithDType> IntFormatter<S> { fn new() -> Self { Self { _phantom: std::marker::PhantomData, } } } impl<S> TensorFormatter for IntFormatter<S> where S: WithDType + std::fmt::Display, { type Elem = S; fn fmt<T: std::fmt::Write>(&self, v: Self::Elem, max_w: usize, f: &mut T) -> std::fmt::Result { write!(f, "{v:max_w$}") } } fn get_summarized_data(t: &Tensor, edge_items: usize) -> Result<Tensor> { let dims = t.dims(); if dims.is_empty() { Ok(t.clone()) } else if dims.len() == 1 { if dims[0] > 2 * edge_items { Tensor::cat( &[ t.narrow(0, 0, edge_items)?, t.narrow(0, dims[0] - edge_items, edge_items)?, ], 0, ) } else { Ok(t.clone()) } } else if dims[0] > 2 * edge_items { let mut vs: Vec<_> = (0..edge_items) .map(|i| get_summarized_data(&t.get(i)?, edge_items)) .collect::<Result<Vec<_>>>()?; for i in (dims[0] - edge_items)..dims[0] { vs.push(get_summarized_data(&t.get(i)?, edge_items)?) } Tensor::cat(&vs, 0) } else { let vs: Vec<_> = (0..dims[0]) .map(|i| get_summarized_data(&t.get(i)?, edge_items)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&vs, 0) } } impl std::fmt::Display for Tensor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let po = PRINT_OPTS.lock().unwrap(); let summarize = self.elem_count() > po.threshold; let to_display = if summarize { match get_summarized_data(self, po.edge_items) { Ok(v) => v, Err(err) => return write!(f, "{err:?}"), } } else { self.clone() }; match self.dtype() { DType::U8 => { let tf: IntFormatter<u8> = IntFormatter::new(); let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } DType::U32 => { let tf: IntFormatter<u32> = IntFormatter::new(); let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } DType::I64 => { let tf: IntFormatter<i64> = IntFormatter::new(); let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } DType::BF16 => { if let Ok(tf) = FloatFormatter::<bf16>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } DType::F16 => { if let Ok(tf) = FloatFormatter::<f16>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } DType::F64 => { if let Ok(tf) = FloatFormatter::<f64>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } DType::F32 => { if let Ok(tf) = FloatFormatter::<f32>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } }; let device_str = match self.device().location() { crate::DeviceLocation::Cpu => "".to_owned(), crate::DeviceLocation::Cuda { gpu_id } => { format!(", cuda:{}", gpu_id) } crate::DeviceLocation::Metal { gpu_id } => { format!(", metal:{}", gpu_id) } }; write!( f, "Tensor[{:?}, {}{}]", self.dims(), self.dtype().as_str(), device_str ) } }
candle/candle-core/src/display.rs/0
{ "file_path": "candle/candle-core/src/display.rs", "repo_id": "candle", "token_count": 9761 }
#![allow(unused)] use super::GgmlDType; use crate::{CudaDevice, CudaStorage, Error, Result}; pub struct QCudaStorage { dtype: GgmlDType, device: CudaDevice, } impl QCudaStorage { pub fn zeros(_: &CudaDevice, _: usize, _: GgmlDType) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } pub fn dtype(&self) -> GgmlDType { self.dtype } pub fn device(&self) -> &CudaDevice { &self.device } pub fn dequantize(&self, _elem_count: usize) -> Result<CudaStorage> { Err(Error::NotCompiledWithCudaSupport) } pub fn dequantize_f16(&self, _elem_count: usize) -> Result<CudaStorage> { Err(Error::NotCompiledWithCudaSupport) } pub fn quantize(&mut self, _src: &CudaStorage) -> Result<()> { Err(Error::NotCompiledWithCudaSupport) } pub fn storage_size_in_bytes(&self) -> usize { 0 } pub fn fwd( &self, _self_shape: &crate::Shape, _storage: &CudaStorage, _layout: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { Err(Error::NotCompiledWithCudaSupport) } } pub fn load_quantized<T: super::GgmlType + Send + Sync + 'static>( _device: &CudaDevice, _data: &[T], ) -> Result<super::QStorage> { Err(Error::NotCompiledWithCudaSupport) }
candle/candle-core/src/quantized/dummy_cuda.rs/0
{ "file_path": "candle/candle-core/src/quantized/dummy_cuda.rs", "repo_id": "candle", "token_count": 594 }
use crate::Layout; /// An iterator over offset position for items of an N-dimensional arrays stored in a /// flat buffer using some potential strides. #[derive(Debug)] pub struct StridedIndex<'a> { next_storage_index: Option<usize>, multi_index: Vec<usize>, dims: &'a [usize], stride: &'a [usize], } impl<'a> StridedIndex<'a> { pub(crate) fn new(dims: &'a [usize], stride: &'a [usize], start_offset: usize) -> Self { let elem_count: usize = dims.iter().product(); let next_storage_index = if elem_count == 0 { None } else { // This applies to the scalar case. Some(start_offset) }; StridedIndex { next_storage_index, multi_index: vec![0; dims.len()], dims, stride, } } pub(crate) fn from_layout(l: &'a Layout) -> Self { Self::new(l.dims(), l.stride(), l.start_offset()) } } impl Iterator for StridedIndex<'_> { type Item = usize; fn next(&mut self) -> Option<Self::Item> { let storage_index = self.next_storage_index?; let mut updated = false; let mut next_storage_index = storage_index; for ((multi_i, max_i), stride_i) in self .multi_index .iter_mut() .zip(self.dims.iter()) .zip(self.stride.iter()) .rev() { let next_i = *multi_i + 1; if next_i < *max_i { *multi_i = next_i; updated = true; next_storage_index += stride_i; break; } else { next_storage_index -= *multi_i * stride_i; *multi_i = 0 } } self.next_storage_index = if updated { Some(next_storage_index) } else { None }; Some(storage_index) } } #[derive(Debug)] pub enum StridedBlocks<'a> { SingleBlock { start_offset: usize, len: usize, }, MultipleBlocks { block_start_index: StridedIndex<'a>, block_len: usize, }, }
candle/candle-core/src/strided_index.rs/0
{ "file_path": "candle/candle-core/src/strided_index.rs", "repo_id": "candle", "token_count": 1094 }
import torch from collections import OrderedDict # Write a trivial tensor to a pt file a= torch.tensor([[1,2,3,4], [5,6,7,8]]) o = OrderedDict() o["test"] = a # Write a trivial tensor to a pt file torch.save(o, "test.pt") ############################################################################################################ # Write a trivial tensor to a pt file with a key torch.save({"model_state_dict": o}, "test_with_key.pt") ############################################################################################################ # Create a tensor with fortran contiguous memory layout import numpy as np # Step 1: Create a 3D NumPy array with Fortran order using a range of numbers # For example, creating a 2x3x4 array array_fortran = np.asfortranarray(np.arange(1, 2*3*4 + 1).reshape(2, 3, 4)) # Verify the memory order print("Is Fortran contiguous (F order):", array_fortran.flags['F_CONTIGUOUS']) # Should be True print("Is C contiguous (C order):", array_fortran.flags['C_CONTIGUOUS']) # Should be False # Step 2: Convert the NumPy array to a PyTorch tensor tensor_fortran = torch.from_numpy(array_fortran) # Verify the tensor layout print("Tensor stride:", tensor_fortran.stride()) # Stride will reflect the Fortran memory layout # Step 3: Save the PyTorch tensor to a .pth file torch.save({"tensor_fortran": tensor_fortran}, 'fortran_tensor_3d.pth') print("3D Tensor saved with Fortran layout.")
candle/candle-core/tests/pth.py/0
{ "file_path": "candle/candle-core/tests/pth.py", "repo_id": "candle", "token_count": 441 }
//! The CIFAR-10 dataset. //! //! The files can be downloaded from the following page: //! <https://www.cs.toronto.edu/~kriz/cifar.html> //! The binary version of the dataset is used. use crate::vision::Dataset; use candle::{DType, Device, Error, Result, Tensor}; use hf_hub::{api::sync::Api, Repo, RepoType}; use parquet::file::reader::{FileReader, SerializedFileReader}; use std::fs::File; use std::io::{BufReader, Read}; const W: usize = 32; const H: usize = 32; const C: usize = 3; const BYTES_PER_IMAGE: usize = W * H * C + 1; const SAMPLES_PER_FILE: usize = 10000; fn read_file(filename: &std::path::Path) -> Result<(Tensor, Tensor)> { let mut buf_reader = BufReader::new(File::open(filename)?); let mut data = vec![0u8; SAMPLES_PER_FILE * BYTES_PER_IMAGE]; buf_reader.read_exact(&mut data)?; let mut images = vec![]; let mut labels = vec![]; for index in 0..SAMPLES_PER_FILE { let content_offset = BYTES_PER_IMAGE * index; labels.push(data[content_offset]); images.push(&data[1 + content_offset..content_offset + BYTES_PER_IMAGE]); } let images: Vec<u8> = images .iter() .copied() .flatten() .copied() .collect::<Vec<_>>(); let labels = Tensor::from_vec(labels, SAMPLES_PER_FILE, &Device::Cpu)?; let images = Tensor::from_vec(images, (SAMPLES_PER_FILE, C, H, W), &Device::Cpu)?; let images = (images.to_dtype(DType::F32)? / 255.)?; Ok((images, labels)) } pub fn load_dir<T: AsRef<std::path::Path>>(dir: T) -> Result<Dataset> { let dir = dir.as_ref(); let (test_images, test_labels) = read_file(&dir.join("test_batch.bin"))?; let train_images_and_labels = [ "data_batch_1.bin", "data_batch_2.bin", "data_batch_3.bin", "data_batch_4.bin", "data_batch_5.bin", ] .iter() .map(|x| read_file(&dir.join(x))) .collect::<Result<Vec<_>>>()?; let (train_images, train_labels): (Vec<_>, Vec<_>) = train_images_and_labels.into_iter().unzip(); Ok(Dataset { train_images: Tensor::cat(&train_images, 0)?, train_labels: Tensor::cat(&train_labels, 0)?, test_images, test_labels, labels: 10, }) } fn load_parquet(parquet: SerializedFileReader<std::fs::File>) -> Result<(Tensor, Tensor)> { let samples = parquet.metadata().file_metadata().num_rows() as usize; let mut buffer_images: Vec<u8> = Vec::with_capacity(samples * 1_024); let mut buffer_labels: Vec<u8> = Vec::with_capacity(samples); for row in parquet.into_iter().flatten() { for (_name, field) in row.get_column_iter() { if let parquet::record::Field::Group(subrow) = field { for (_name, field) in subrow.get_column_iter() { if let parquet::record::Field::Bytes(value) = field { let image = image::load_from_memory(value.data()).unwrap(); buffer_images.extend(image.to_rgb8().as_raw()); } } } else if let parquet::record::Field::Long(label) = field { buffer_labels.push(*label as u8); } } } let images = (Tensor::from_vec(buffer_images, (samples, 3, 32, 32), &Device::Cpu)? .to_dtype(DType::U8)? / 255.)?; let labels = Tensor::from_vec(buffer_labels, (samples,), &Device::Cpu)?; Ok((images, labels)) } pub fn load() -> Result<Dataset> { let api = Api::new().map_err(|e| Error::Msg(format!("Api error: {e}")))?; let dataset_id = "cifar10".to_string(); let repo = Repo::with_revision( dataset_id, RepoType::Dataset, "refs/convert/parquet".to_string(), ); let repo = api.repo(repo); let test_parquet_filename = repo .get("plain_text/test/0000.parquet") .map_err(|e| Error::Msg(format!("Api error: {e}")))?; let train_parquet_filename = repo .get("plain_text/train/0000.parquet") .map_err(|e| Error::Msg(format!("Api error: {e}")))?; let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?) .map_err(|e| Error::Msg(format!("Parquet error: {e}")))?; let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?) .map_err(|e| Error::Msg(format!("Parquet error: {e}")))?; let (test_images, test_labels) = load_parquet(test_parquet)?; let (train_images, train_labels) = load_parquet(train_parquet)?; Ok(crate::vision::Dataset { train_images, train_labels, test_images, test_labels, labels: 10, }) }
candle/candle-datasets/src/vision/cifar.rs/0
{ "file_path": "candle/candle-datasets/src/vision/cifar.rs", "repo_id": "candle", "token_count": 2139 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::chatglm::{Config, Model}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: Model, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer, logits_processor, repeat_penalty, repeat_last_n, verbose_prompt, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); let tokens = self.tokenizer.encode(prompt, true).map_err(E::msg)?; if tokens.is_empty() { anyhow::bail!("Empty prompts are not supported in the chatglm model.") } if self.verbose_prompt { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('▁', " ").replace("<0x0A>", "\n"); println!("{id:7} -> '{token}'"); } } let mut tokens = tokens.get_ids().to_vec(); let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_vocab(true).get("</s>") { Some(token) => *token, None => anyhow::bail!("cannot find the endoftext token"), }; print!("{prompt}"); std::io::stdout().flush()?; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input)?; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?; print!("{token}"); std::io::stdout().flush()?; } let dt = start_gen.elapsed(); println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Display the token for the specified prompt. #[arg(long)] verbose_prompt: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 5000)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] weight_file: Option<String>, #[arg(long)] tokenizer: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let model_id = match args.model_id { Some(model_id) => model_id.to_string(), None => "THUDM/chatglm3-6b".to_string(), }; let revision = match args.revision { Some(rev) => rev.to_string(), None => "main".to_string(), }; let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision)); let tokenizer_filename = match args.tokenizer { Some(file) => std::path::PathBuf::from(file), None => api .model("lmz/candle-chatglm".to_string()) .get("chatglm-tokenizer.json")?, }; let filenames = match args.weight_file { Some(weight_file) => vec![std::path::PathBuf::from(weight_file)], None => candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let config = Config::glm3_6b(); let device = candle_examples::device(args.cpu)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? }; let model = Model::new(&config, vb)?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, args.verbose_prompt, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/chatglm/main.rs/0
{ "file_path": "candle/candle-examples/examples/chatglm/main.rs", "repo_id": "candle", "token_count": 3423 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use std::fmt::Display; use std::path::PathBuf; use anyhow::bail; use anyhow::{Error as E, Result}; use candle::{Device, Tensor}; use candle_nn::ops::softmax; use candle_nn::VarBuilder; use candle_transformers::models::debertav2::{Config as DebertaV2Config, DebertaV2NERModel}; use candle_transformers::models::debertav2::{DebertaV2SeqClassificationModel, Id2Label}; use candle_transformers::models::debertav2::{NERItem, TextClassificationItem}; use clap::{ArgGroup, Parser, ValueEnum}; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::{Encoding, PaddingParams, Tokenizer}; enum TaskType { Ner(DebertaV2NERModel), TextClassification(DebertaV2SeqClassificationModel), } #[derive(Parser, Debug, Clone, ValueEnum)] enum ArgsTask { /// Named Entity Recognition Ner, /// Text Classification TextClassification, } impl Display for ArgsTask { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { ArgsTask::Ner => write!(f, "ner"), ArgsTask::TextClassification => write!(f, "text-classification"), } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] #[command(group(ArgGroup::new("model") .required(true) .args(&["model_id", "model_path"])))] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// The model id to use from HuggingFace #[arg(long, requires_if("model_id", "revision"))] model_id: Option<String>, /// Revision of the model to use (default: "main") #[arg(long, default_value = "main")] revision: String, /// Specify a sentence to inference. Specify multiple times to inference multiple sentences. #[arg(long = "sentence", name="sentences", num_args = 1..)] sentences: Vec<String>, /// Use the pytorch weights rather than the by-default safetensors #[arg(long)] use_pth: bool, /// Perform a very basic benchmark on inferencing, using N number of iterations #[arg(long)] benchmark_iters: Option<usize>, /// Which task to run #[arg(long, default_value_t = ArgsTask::Ner)] task: ArgsTask, /// Use model from a specific directory instead of HuggingFace local cache. /// Using this ignores model_id and revision args. #[arg(long)] model_path: Option<PathBuf>, /// Pass in an Id2Label if the model config does not provide it, in JSON format. Example: --id2label='{"0": "True", "1": "False"}' #[arg(long)] id2label: Option<String>, } impl Args { fn build_model_and_tokenizer( &self, ) -> Result<(TaskType, DebertaV2Config, Tokenizer, Id2Label)> { let device = candle_examples::device(self.cpu)?; // Get files from either the HuggingFace API, or from a specified local directory. let (config_filename, tokenizer_filename, weights_filename) = { match &self.model_path { Some(base_path) => { if !base_path.is_dir() { bail!("Model path {} is not a directory.", base_path.display()) } let config = base_path.join("config.json"); let tokenizer = base_path.join("tokenizer.json"); let weights = if self.use_pth { base_path.join("pytorch_model.bin") } else { base_path.join("model.safetensors") }; (config, tokenizer, weights) } None => { let repo = Repo::with_revision( self.model_id.as_ref().unwrap().clone(), RepoType::Model, self.revision.clone(), ); let api = Api::new()?; let api = api.repo(repo); let config = api.get("config.json")?; let tokenizer = api.get("tokenizer.json")?; let weights = if self.use_pth { api.get("pytorch_model.bin")? } else { api.get("model.safetensors")? }; (config, tokenizer, weights) } } }; let config = std::fs::read_to_string(config_filename)?; let config: DebertaV2Config = serde_json::from_str(&config)?; // Command-line id2label takes precedence. Otherwise, use model config's id2label. // If neither is specified, then we can't proceed. let id2label = if let Some(id2labelstr) = &self.id2label { serde_json::from_str(id2labelstr.as_str())? } else if let Some(id2label) = &config.id2label { id2label.clone() } else { bail!("Id2Label not found in the model configuration nor specified as a parameter") }; let mut tokenizer = Tokenizer::from_file(tokenizer_filename) .map_err(|e| candle::Error::Msg(format!("Tokenizer error: {e}")))?; tokenizer.with_padding(Some(PaddingParams::default())); let vb = if self.use_pth { VarBuilder::from_pth( &weights_filename, candle_transformers::models::debertav2::DTYPE, &device, )? } else { unsafe { VarBuilder::from_mmaped_safetensors( &[weights_filename], candle_transformers::models::debertav2::DTYPE, &device, )? } }; let vb = vb.set_prefix("deberta"); match self.task { ArgsTask::Ner => Ok(( TaskType::Ner(DebertaV2NERModel::load( vb, &config, Some(id2label.clone()), )?), config, tokenizer, id2label, )), ArgsTask::TextClassification => Ok(( TaskType::TextClassification(DebertaV2SeqClassificationModel::load( vb, &config, Some(id2label.clone()), )?), config, tokenizer, id2label, )), } } } fn get_device(model_type: &TaskType) -> &Device { match model_type { TaskType::Ner(ner_model) => &ner_model.device, TaskType::TextClassification(classification_model) => &classification_model.device, } } struct ModelInput { encoding: Vec<Encoding>, input_ids: Tensor, attention_mask: Tensor, token_type_ids: Tensor, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let model_load_time = std::time::Instant::now(); let (task_type, _model_config, tokenizer, id2label) = args.build_model_and_tokenizer()?; println!( "Loaded model and tokenizers in {:?}", model_load_time.elapsed() ); let device = get_device(&task_type); let tokenize_time = std::time::Instant::now(); let model_input: ModelInput = { let tokenizer_encodings = tokenizer .encode_batch(args.sentences, true) .map_err(E::msg)?; let mut encoding_stack: Vec<Tensor> = Vec::default(); let mut attention_mask_stack: Vec<Tensor> = Vec::default(); let mut token_type_id_stack: Vec<Tensor> = Vec::default(); for encoding in &tokenizer_encodings { encoding_stack.push(Tensor::new(encoding.get_ids(), device)?); attention_mask_stack.push(Tensor::new(encoding.get_attention_mask(), device)?); token_type_id_stack.push(Tensor::new(encoding.get_type_ids(), device)?); } ModelInput { encoding: tokenizer_encodings, input_ids: Tensor::stack(&encoding_stack[..], 0)?, attention_mask: Tensor::stack(&attention_mask_stack[..], 0)?, token_type_ids: Tensor::stack(&token_type_id_stack[..], 0)?, } }; println!( "Tokenized and loaded inputs in {:?}", tokenize_time.elapsed() ); match task_type { TaskType::Ner(ner_model) => { if let Some(num_iters) = args.benchmark_iters { create_benchmark(num_iters, model_input)( |input_ids, token_type_ids, attention_mask| { ner_model.forward(input_ids, Some(token_type_ids), Some(attention_mask))?; Ok(()) }, )?; std::process::exit(0); } let inference_time = std::time::Instant::now(); let logits = ner_model.forward( &model_input.input_ids, Some(model_input.token_type_ids), Some(model_input.attention_mask), )?; println!("Inferenced inputs in {:?}", inference_time.elapsed()); let max_scores_vec = softmax(&logits, 2)?.max(2)?.to_vec2::<f32>()?; let max_indices_vec: Vec<Vec<u32>> = logits.argmax(2)?.to_vec2()?; let input_ids = model_input.input_ids.to_vec2::<u32>()?; let mut results: Vec<Vec<NERItem>> = Default::default(); for (input_row_idx, input_id_row) in input_ids.iter().enumerate() { let mut current_row_result: Vec<NERItem> = Default::default(); let current_row_encoding = model_input.encoding.get(input_row_idx).unwrap(); let current_row_tokens = current_row_encoding.get_tokens(); let current_row_max_scores = max_scores_vec.get(input_row_idx).unwrap(); for (input_id_idx, _input_id) in input_id_row.iter().enumerate() { // Do not include special characters in output if current_row_encoding.get_special_tokens_mask()[input_id_idx] == 1 { continue; } let max_label_idx = max_indices_vec .get(input_row_idx) .unwrap() .get(input_id_idx) .unwrap(); let label = id2label.get(max_label_idx).unwrap().clone(); // Do not include those labeled as "O" ("Other") if label == "O" { continue; } current_row_result.push(NERItem { entity: label, word: current_row_tokens[input_id_idx].clone(), score: current_row_max_scores[input_id_idx], start: current_row_encoding.get_offsets()[input_id_idx].0, end: current_row_encoding.get_offsets()[input_id_idx].1, index: input_id_idx, }); } results.push(current_row_result); } println!("\n{:?}", results); } TaskType::TextClassification(classification_model) => { let inference_time = std::time::Instant::now(); let logits = classification_model.forward( &model_input.input_ids, Some(model_input.token_type_ids), Some(model_input.attention_mask), )?; println!("Inferenced inputs in {:?}", inference_time.elapsed()); let predictions = logits.argmax(1)?.to_vec1::<u32>()?; let scores = softmax(&logits, 1)?.max(1)?.to_vec1::<f32>()?; let mut results = Vec::<TextClassificationItem>::default(); for (idx, prediction) in predictions.iter().enumerate() { results.push(TextClassificationItem { label: id2label[prediction].clone(), score: scores[idx], }); } println!("\n{:?}", results); } } Ok(()) } fn create_benchmark<F>( num_iters: usize, model_input: ModelInput, ) -> impl Fn(F) -> Result<(), candle::Error> where F: Fn(&Tensor, Tensor, Tensor) -> Result<(), candle::Error>, { move |code: F| -> Result<(), candle::Error> { println!("Running {num_iters} iterations..."); let mut durations = Vec::with_capacity(num_iters); for _ in 0..num_iters { let token_type_ids = model_input.token_type_ids.clone(); let attention_mask = model_input.attention_mask.clone(); let start = std::time::Instant::now(); code(&model_input.input_ids, token_type_ids, attention_mask)?; let duration = start.elapsed(); durations.push(duration.as_nanos()); } let min_time = *durations.iter().min().unwrap(); let max_time = *durations.iter().max().unwrap(); let avg_time = durations.iter().sum::<u128>() as f64 / num_iters as f64; println!("Min time: {:.3} ms", min_time as f64 / 1_000_000.0); println!("Avg time: {:.3} ms", avg_time / 1_000_000.0); println!("Max time: {:.3} ms", max_time as f64 / 1_000_000.0); Ok(()) } }
candle/candle-examples/examples/debertav2/main.rs/0
{ "file_path": "candle/candle-examples/examples/debertav2/main.rs", "repo_id": "candle", "token_count": 6817 }
use std::cmp::min; use candle::{bail, DType, Device, Result, Tensor}; use candle_transformers::models::llava::{ config::{HFPreProcessorConfig, LLaVAConfig}, utils::select_best_resolution, }; use hf_hub::api::sync::Api; use image::{imageops::overlay, DynamicImage, GenericImageView, Rgb, RgbImage}; use serde::{Deserialize, Serialize}; //This struct is mainly for LLaVA aplications, hence it's not completely compatible with python transformer CLIPImageProcessor few several preprocess that LLaVA used, including "openai/clip-vit-large-patch14-336" and "openai/clip-vit-large-patch14". #[derive(Serialize, Deserialize, Debug)] pub struct ImageProcessor { #[serde(default = "default_size")] pub size: u32, // this is not the same as python transformer #[serde(default = "default_do_resize")] pub do_resize: bool, //resample: u32 // 3 for PIL bicubic, equivalent to rust CatmullRom. Hence below we use CatmullRom #[serde(default = "default_do_center_crop")] pub do_center_crop: bool, #[serde(default = "default_crop_size")] pub crop_size: u32, // this is not the same as python transformer #[serde(default = "default_do_rescale")] pub do_rescale: bool, #[serde(default = "default_rescale_factor")] pub rescale_factor: f32, #[serde(default = "default_do_normalize")] pub do_normalize: bool, #[serde(default = "default_image_mean")] pub image_mean: Vec<f32>, #[serde(default = "default_image_std")] pub image_std: Vec<f32>, } fn default_size() -> u32 { 224 } fn default_do_resize() -> bool { true } fn default_do_center_crop() -> bool { true } fn default_crop_size() -> u32 { 224 } fn default_do_rescale() -> bool { true } fn default_rescale_factor() -> f32 { 1.0 / 255.0 } fn default_do_normalize() -> bool { true } fn default_image_mean() -> Vec<f32> { vec![0.48145466, 0.4578275, 0.40821073] } fn default_image_std() -> Vec<f32> { vec![0.26862954, 0.2613026, 0.2757771] } impl ImageProcessor { pub fn from_pretrained(clip_id: &str) -> Result<Self> { let api = Api::new().map_err(|e| candle::Error::Msg(e.to_string()))?; let api = api.model(clip_id.to_string()); let config_filename = api .get("preprocessor_config.json") .map_err(|e| candle::Error::Msg(e.to_string()))?; let image_processor = serde_json::from_slice(&std::fs::read(config_filename).map_err(candle::Error::Io)?) .map_err(|e| candle::Error::Msg(e.to_string()))?; Ok(image_processor) } pub fn from_hf_preprocessor_config(hf_preprocessor_config: &HFPreProcessorConfig) -> Self { Self { size: hf_preprocessor_config.size["shortest_edge"] as u32, do_resize: hf_preprocessor_config.do_resize, do_center_crop: hf_preprocessor_config.do_center_crop, crop_size: hf_preprocessor_config.crop_size["height"] as u32, do_rescale: hf_preprocessor_config.do_rescale, rescale_factor: hf_preprocessor_config.rescale_factor, do_normalize: hf_preprocessor_config.do_normalize, image_mean: hf_preprocessor_config.image_mean.clone(), image_std: hf_preprocessor_config.image_std.clone(), } } ///shortest edge to self.resize, other edge is resized to maintain aspect ratio pub fn resize(&self, image: &DynamicImage) -> DynamicImage { let (width, height) = image.dimensions(); let size = self.size; if width == size && height == size { image.clone() } else { let (new_width, new_height) = if width < height { ( size, (((size * height) as f32) / width as f32).ceil() as u32, ) } else { ( (((size * width) as f32) / height as f32).ceil() as u32, size, ) }; image.resize( new_width, new_height, image::imageops::FilterType::CatmullRom, ) } } pub fn center_crop(&self, image: &DynamicImage) -> DynamicImage { let (width, height) = image.dimensions(); let crop_size = self.crop_size; let (left, top) = calculate_middle((width, height), (crop_size, crop_size)); image.crop_imm(left, top, crop_size, crop_size) } pub fn to_tensor(&self, image: &DynamicImage) -> Result<Tensor> { let img = image.to_rgb8().into_raw(); let (width, height) = image.dimensions(); Tensor::from_vec(img, (height as usize, width as usize, 3), &Device::Cpu)? .to_dtype(DType::F32) // only for internal compute } pub fn rescale(&self, tensor: &Tensor) -> Result<Tensor> { let rescale_factor = self.rescale_factor as f64; tensor.affine(rescale_factor, 0.0) } pub fn normalize(&self, tensor: &Tensor) -> Result<Tensor> { let image_mean = self.image_mean.clone(); let image_std = self.image_std.clone(); let mean = Tensor::from_vec(image_mean, (3,), &Device::Cpu)?; let std = Tensor::from_vec(image_std, (3,), &Device::Cpu)?; tensor.broadcast_sub(&mean)?.broadcast_div(&std) } pub fn to_channel_dimension_format(&self, tensor: &Tensor) -> Result<Tensor> { tensor.permute((2, 0, 1)) } pub fn preprocess(&self, image: &DynamicImage) -> Result<Tensor> { let image = if self.do_resize { self.resize(image) } else { image.clone() }; let image = if self.do_center_crop { self.center_crop(&image) } else { image }; let tensor = self.to_tensor(&image)?; let tensor = if self.do_rescale { self.rescale(&tensor)? } else { tensor }; let tensor = if self.do_normalize { self.normalize(&tensor)? } else { tensor }; self.to_channel_dimension_format(&tensor) } } pub fn calculate_middle(image_size: (u32, u32), center_size: (u32, u32)) -> (u32, u32) { let (width, height) = image_size; let (center_width, center_height) = center_size; let left = if width <= center_width { 0 } else { ((width as f32 - center_width as f32) / 2.0).ceil() as u32 }; let top = if height <= center_height { 0 } else { ((height as f32 - center_height as f32) / 2.0).ceil() as u32 }; (left, top) } pub fn process_image( image: &DynamicImage, processor: &ImageProcessor, llava_config: &LLaVAConfig, ) -> candle::Result<Tensor> { if llava_config.image_aspect_ratio == *"square" { processor.preprocess(image)?.unsqueeze(0) } else if llava_config.image_aspect_ratio == *"anyres" { process_anyres_image(image, processor, &llava_config.image_grid_pinpoints) } else if llava_config.image_aspect_ratio == *"pad" { process_pad_image(image, processor) } else { bail!("Invalid image aspect ratio") } } fn process_pad_image(image: &DynamicImage, processor: &ImageProcessor) -> Result<Tensor> { let mean_color = processor .image_mean .iter() .map(|x| ((*x) * 255.0) as u8) .collect::<Vec<u8>>(); let mean_color = Rgb::from([mean_color[0], mean_color[1], mean_color[2]]); let image_padded = expand2square(image, mean_color); processor.preprocess(&image_padded) } fn process_anyres_image( image: &DynamicImage, processor: &ImageProcessor, grid_pinpoints: &[(u32, u32)], ) -> Result<Tensor> { let original_size = image.dimensions(); let best_resolution = select_best_resolution(original_size, grid_pinpoints); let image_padded = resize_and_pad_image(image, best_resolution); let image_original_resize = image.resize_exact( processor.size, processor.size, image::imageops::FilterType::CatmullRom, ); let mut patches = vec![image_original_resize]; for patch in divide_to_patches(&image_padded, processor.crop_size) { patches.push(patch); } let tensors = patches .iter() .map(|patch| processor.preprocess(patch)) .collect::<Result<Vec<Tensor>>>()?; Tensor::stack(&tensors, 0) } fn expand2square(image: &DynamicImage, background_color: Rgb<u8>) -> DynamicImage { let (width, height) = image.dimensions(); match width.cmp(&height) { std::cmp::Ordering::Less => { let mut new_image = DynamicImage::from(RgbImage::from_pixel(height, height, background_color)); overlay(&mut new_image, image, ((height - width) / 2) as i64, 0); new_image } std::cmp::Ordering::Equal => image.clone(), std::cmp::Ordering::Greater => { let mut new_image = DynamicImage::from(RgbImage::from_pixel(width, width, background_color)); overlay(&mut new_image, image, 0, ((width - height) / 2) as i64); new_image } } } fn resize_and_pad_image(image: &DynamicImage, target_resolution: (u32, u32)) -> DynamicImage { let (original_width, original_height) = image.dimensions(); let original_width_f = original_width as f32; let original_height_f = original_height as f32; let (target_width, target_height) = target_resolution; let target_width_f = target_width as f32; let target_height_f = target_height as f32; let scale_w = target_width_f / original_width_f; let scale_h = target_height_f / original_height_f; let (new_width, new_height) = if scale_w < scale_h { ( target_width, min((original_height_f * scale_w).ceil() as u32, target_height), ) } else { ( min((original_width_f * scale_h).ceil() as u32, target_width), target_height, ) }; let resized_image = image.resize_exact( new_width, new_height, image::imageops::FilterType::CatmullRom, ); let mut new_image = DynamicImage::new_rgb8(target_width, target_height); let (paste_x, paste_y) = calculate_middle((target_width, target_height), (new_width, new_height)); overlay( &mut new_image, &resized_image, paste_x.into(), paste_y.into(), ); new_image } fn divide_to_patches(image: &DynamicImage, patch_size: u32) -> Vec<DynamicImage> { let (width, height) = image.dimensions(); let mut patches = Vec::new(); for y in (0..height).step_by(patch_size as usize) { for x in (0..width).step_by(patch_size as usize) { let patch = image.crop_imm(x, y, patch_size, patch_size); patches.push(patch); } } patches }
candle/candle-examples/examples/llava/image_processor.rs/0
{ "file_path": "candle/candle-examples/examples/llava/image_processor.rs", "repo_id": "candle", "token_count": 4904 }
# candle-mistral: 7b LLM with Apache 2.0 licensed weights Mistral-7B-v0.1 is a pretrained generative LLM with 7 billion parameters. It outperforms all the publicly available 13b models as of 2023-09-28. Weights (and the original Python model code) are released under the permissive Apache 2.0 license. - [Blog post](https://mistral.ai/news/announcing-mistral-7b/) from Mistral announcing the model release. - [Model card](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the HuggingFace Hub. This example supports the initial model as well as a quantized variant. ## Running the example ```bash $ cargo run --example mistral --release --features cuda -- --prompt 'Write helloworld code in Rust' --sample-len 150 Generated text: Write helloworld code in Rust ============================= This is a simple example of how to write "Hello, world!" program in Rust. ## Compile and run ``bash $ cargo build --release Compiling hello-world v0.1.0 (/home/user/rust/hello-world) Finished release [optimized] target(s) in 0.26s $ ./target/release/hello-world Hello, world! `` ## Source code ``rust fn main() { println!("Hello, world!"); } `` ## License This example is released under the terms ``` ## Running the quantized version of the model ```bash $ cargo run --example mistral --features accelerate --release -- \ $ --prompt "Here is a sample quick sort implementation in rust " --quantized -n 400 avx: false, neon: true, simd128: false, f16c: false temp: 0.00 repeat-penalty: 1.10 repeat-last-n: 64 retrieved the files in 562.292Β΅s loaded the model in 1.100323667s Here is a sample quick sort implementation in rust ``rust fn quick_sort(arr: &mut [i32]) { if arr.len() <= 1 { return; } let pivot = arr[0]; let mut left = vec![]; let mut right = vec![]; for i in 1..arr.len() { if arr[i] < pivot { left.push(arr[i]); } else { right.push(arr[i]); } } quick_sort(&mut left); quick_sort(&mut right); let mut i = 0; for _ in &left { arr[i] = left.pop().unwrap(); i += 1; } for _ in &right { arr[i] = right.pop().unwrap(); i += 1; } } `` 226 tokens generated (10.91 token/s) ```
candle/candle-examples/examples/mistral/README.md/0
{ "file_path": "candle/candle-examples/examples/mistral/README.md", "repo_id": "candle", "token_count": 829 }
use candle::{DType, Device, Result, Tensor, D}; use candle_nn::{ embedding, layer_norm, linear_no_bias, Activation, Embedding, LayerNorm, Linear, Module, VarBuilder, }; use candle_transformers::models::{encodec, t5}; // https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/models/musicgen/configuration_musicgen.py#L83 #[derive(Debug, Clone, PartialEq)] pub struct Config { vocab_size: usize, max_position_embeddings: usize, num_hidden_layers: usize, ffn_dim: usize, num_attention_heads: usize, layerdrop: f64, use_cache: bool, activation_function: Activation, hidden_size: usize, dropout: f64, attention_dropout: f64, activation_dropout: f64, initializer_factor: f64, scale_embedding: bool, num_codebooks: usize, pad_token_id: usize, bos_token_id: usize, eos_token_id: Option<usize>, tie_word_embeddings: bool, } impl Default for Config { fn default() -> Self { Self { vocab_size: 2048, max_position_embeddings: 2048, num_hidden_layers: 24, ffn_dim: 4096, num_attention_heads: 16, layerdrop: 0.0, use_cache: true, activation_function: Activation::Gelu, hidden_size: 1024, dropout: 0.1, attention_dropout: 0.0, activation_dropout: 0.0, initializer_factor: 0.02, scale_embedding: false, num_codebooks: 4, pad_token_id: 2048, bos_token_id: 2048, eos_token_id: None, tie_word_embeddings: false, } } } impl Config { fn musicgen_small() -> Self { Self { vocab_size: 2048, max_position_embeddings: 2048, num_hidden_layers: 24, ffn_dim: 4096, num_attention_heads: 16, layerdrop: 0.0, use_cache: true, activation_function: Activation::Gelu, hidden_size: 1024, dropout: 0.1, attention_dropout: 0.0, activation_dropout: 0.0, initializer_factor: 0.02, scale_embedding: false, num_codebooks: 4, pad_token_id: 2048, bos_token_id: 2048, eos_token_id: None, tie_word_embeddings: false, } } } fn get_embedding(num_embeddings: usize, embedding_dim: usize) -> Result<Tensor> { let half_dim = embedding_dim / 2; let emb = f64::ln(10000.) / (half_dim - 1) as f64; let xs: Vec<_> = (0..num_embeddings).map(|v| v as f32).collect(); let xs = Tensor::from_vec(xs, (num_embeddings, 1), &Device::Cpu)?; let ys: Vec<_> = (0..half_dim) .map(|v| f64::exp(v as f64 * -emb) as f32) .collect(); let ys = Tensor::from_vec(ys, (1, half_dim), &Device::Cpu)?; let shape = (num_embeddings, half_dim); let emb = (xs.broadcast_as(shape)? * ys.broadcast_as(shape)?)?; let emb = Tensor::cat(&[&emb.cos()?, &emb.sin()?], 1)?.reshape((num_embeddings, 2 * half_dim))?; let emb = if embedding_dim % 2 == 1 { let zeros = Tensor::zeros((num_embeddings, 1), DType::F32, &Device::Cpu)?; Tensor::cat(&[&emb, &zeros], 1)? } else { emb }; Ok(emb) } #[derive(Debug)] struct MusicgenSinusoidalPositionalEmbedding { num_positions: usize, embedding_dim: usize, weights: Tensor, } impl MusicgenSinusoidalPositionalEmbedding { fn load(_vb: VarBuilder, cfg: &Config) -> Result<Self> { let num_positions = cfg.max_position_embeddings; let embedding_dim = cfg.hidden_size; let weights = get_embedding(num_positions, embedding_dim)?; Ok(Self { num_positions, embedding_dim, weights, }) } fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let (_b_sz, _codebooks, seq_len) = input_ids.dims3()?; if seq_len > self.weights.dim(0)? { self.weights = get_embedding(seq_len, self.embedding_dim)? } self.weights.narrow(0, 0, seq_len) } } #[derive(Debug)] struct MusicgenAttention { scaling: f64, is_decoder: bool, num_heads: usize, head_dim: usize, k_proj: Linear, v_proj: Linear, q_proj: Linear, out_proj: Linear, } impl MusicgenAttention { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let head_dim = h / num_heads; let k_proj = linear_no_bias(h, h, vb.pp("k_proj"))?; let v_proj = linear_no_bias(h, h, vb.pp("v_proj"))?; let q_proj = linear_no_bias(h, h, vb.pp("q_proj"))?; let out_proj = linear_no_bias(h, h, vb.pp("out_proj"))?; Ok(Self { scaling: 1. / (head_dim as f64).sqrt(), is_decoder: true, num_heads, head_dim, k_proj, v_proj, q_proj, out_proj, }) } fn forward( &mut self, xs: &Tensor, kv_states: Option<&Tensor>, attention_mask: &Tensor, ) -> Result<Tensor> { let (b_sz, tgt_len, _) = xs.dims3()?; let query_states = (self.q_proj.forward(xs)? * self.scaling)?; let kv_states = kv_states.unwrap_or(xs); let key_states = self.k_proj.forward(kv_states)?; let value_states = self.v_proj.forward(kv_states)?; let tgt = (b_sz, tgt_len, self.num_heads, self.head_dim); let query_states = query_states.reshape(tgt)?.transpose(1, 2)?.contiguous()?; let key_states = key_states.reshape(tgt)?.transpose(1, 2)?.contiguous()?; let value_states = value_states.reshape(tgt)?.transpose(1, 2)?.contiguous()?; let src_len = key_states.dim(1)?; let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?; let attn_weights = attn_weights .reshape((b_sz, self.num_heads, tgt_len, src_len))? .broadcast_add(attention_mask)?; let attn_weights = candle_nn::ops::softmax(&attn_weights, D::Minus1)?; // TODO: layer_head_mask? let attn_output = attn_weights .matmul(&value_states)? .reshape((b_sz, self.num_heads, tgt_len, self.head_dim))? .transpose(1, 2)? .reshape((b_sz, tgt_len, self.num_heads * self.head_dim))?; let attn_output = self.out_proj.forward(&attn_output)?; Ok(attn_output) } } #[derive(Debug)] struct MusicgenDecoderLayer { self_attn: MusicgenAttention, self_attn_layer_norm: LayerNorm, encoder_attn: MusicgenAttention, encoder_attn_layer_norm: LayerNorm, fc1: Linear, fc2: Linear, final_layer_norm: LayerNorm, activation_fn: Activation, } impl MusicgenDecoderLayer { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h = cfg.hidden_size; let self_attn = MusicgenAttention::load(vb.pp("self_attn"), cfg)?; let self_attn_layer_norm = layer_norm(h, 1e-5, vb.pp("self_attn_layer_norm"))?; let encoder_attn = MusicgenAttention::load(vb.pp("encoder_attn"), cfg)?; let encoder_attn_layer_norm = layer_norm(h, 1e-5, vb.pp("encoder_attn_layer_norm"))?; let fc1 = linear_no_bias(h, cfg.ffn_dim, vb.pp("fc1"))?; let fc2 = linear_no_bias(cfg.ffn_dim, h, vb.pp("fc2"))?; let final_layer_norm = layer_norm(h, 1e-5, vb.pp("final_layer_norm"))?; Ok(Self { self_attn, self_attn_layer_norm, encoder_attn, encoder_attn_layer_norm, fc1, fc2, final_layer_norm, activation_fn: cfg.activation_function, }) } fn forward( &mut self, xs: &Tensor, attention_mask: &Tensor, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let residual = xs.clone(); let xs = self.self_attn_layer_norm.forward(xs)?; let xs = self.self_attn.forward(&xs, None, attention_mask)?; let mut xs = (xs + residual)?; if let Some(encoder_hidden_states) = &encoder_hidden_states { let residual = xs.clone(); let encoder_attention_mask = attention_mask.clone(); // TODO xs = self.encoder_attn.forward( &xs, Some(encoder_hidden_states), &encoder_attention_mask, )?; xs = (xs + residual)? } let residual = xs.clone(); let xs = self.final_layer_norm.forward(&xs)?; let xs = self.fc1.forward(&xs)?; let xs = self.activation_fn.forward(&xs)?; let xs = self.fc2.forward(&xs)?; let xs = (xs + residual)?; Ok(xs) } } #[derive(Debug)] struct MusicgenDecoder { embed_tokens: Vec<Embedding>, embed_positions: MusicgenSinusoidalPositionalEmbedding, layers: Vec<MusicgenDecoderLayer>, layer_norm: LayerNorm, embed_scale: f64, num_codebooks: usize, d_model: usize, } impl MusicgenDecoder { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h = cfg.hidden_size; let embed_scale = if cfg.scale_embedding { (h as f64).sqrt() } else { 1. }; let embed_dim = cfg.vocab_size + 1; let embed_tokens = (0..cfg.num_codebooks) .map(|i| embedding(embed_dim, h, vb.pp(format!("embed_tokens.{i}")))) .collect::<Result<Vec<_>>>()?; let embed_positions = MusicgenSinusoidalPositionalEmbedding::load(vb.clone(), cfg)?; let layers = (0..cfg.num_hidden_layers) .map(|i| MusicgenDecoderLayer::load(vb.pp(format!("layers.{i}")), cfg)) .collect::<Result<Vec<_>>>()?; let layer_norm = layer_norm(h, 1e-5, vb.pp("layer_norm"))?; Ok(Self { embed_tokens, embed_positions, layers, layer_norm, embed_scale, num_codebooks: cfg.num_codebooks, d_model: cfg.hidden_size, }) } fn prepare_decoder_attention_mask(&self, _b_sz: usize, _seq_len: usize) -> Result<Tensor> { todo!() } fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let dev = input_ids.device(); let (b_sz_times_codebooks, seq_len) = input_ids.dims2()?; let b_sz = b_sz_times_codebooks / self.num_codebooks; let input = input_ids.reshape((b_sz, self.num_codebooks, seq_len))?; let mut inputs_embeds = Tensor::zeros((b_sz, seq_len, self.d_model), DType::F32, dev)?; for (idx, codebook) in self.embed_tokens.iter().enumerate() { let inp = input.narrow(1, idx, 1)?.squeeze(1)?; inputs_embeds = (inputs_embeds + codebook.forward(&inp)?)? } let inputs_embeds = inputs_embeds; let positions = self.embed_positions.forward(&input)?.to_device(dev)?; let mut xs = inputs_embeds.broadcast_add(&positions)?; let attention_mask = self.prepare_decoder_attention_mask(b_sz, seq_len)?; for decoder_layer in self.layers.iter_mut() { xs = decoder_layer.forward(&xs, &attention_mask, None)?; } let xs = self.layer_norm.forward(&xs)?; Ok(xs) } } #[derive(Debug)] pub struct MusicgenForCausalLM { decoder: MusicgenDecoder, lm_heads: Vec<Linear>, num_codebooks: usize, vocab_size: usize, } impl MusicgenForCausalLM { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h = cfg.hidden_size; let decoder = MusicgenDecoder::load(vb.pp("model.decoder"), cfg)?; let lm_heads = (0..cfg.num_codebooks) .map(|i| linear_no_bias(h, cfg.vocab_size, vb.pp(format!("lm_heads.{i}")))) .collect::<Result<Vec<_>>>()?; Ok(Self { decoder, lm_heads, num_codebooks: cfg.num_codebooks, vocab_size: cfg.vocab_size, }) } pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let (b_sz, seq_len) = input_ids.dims2()?; let hidden_states = self.decoder.forward(input_ids)?; let lm_logits = self .lm_heads .iter() .map(|h| h.forward(&hidden_states)) .collect::<Result<Vec<_>>>()?; let lm_logits = Tensor::stack(&lm_logits, 1)?.reshape(( b_sz * self.num_codebooks, seq_len, self.vocab_size, ))?; Ok(lm_logits) } } #[derive(Debug)] pub struct MusicgenForConditionalGeneration { pub text_encoder: t5::T5EncoderModel, pub audio_encoder: encodec::Model, pub decoder: MusicgenForCausalLM, cfg: GenConfig, } #[derive(Debug, Clone, PartialEq)] pub struct GenConfig { musicgen: Config, t5: t5::Config, encodec: encodec::Config, } impl GenConfig { pub fn small() -> Self { // https://huggingface.co/facebook/musicgen-small/blob/495da4ad086b3416a27c6187f9239f9fd96f3962/config.json#L6 let encodec = encodec::Config { audio_channels: 1, chunk_length_s: None, codebook_dim: Some(128), codebook_size: 2048, compress: 2, dilation_growth_rate: 2, hidden_size: 128, kernel_size: 7, last_kernel_size: 7, norm_type: encodec::NormType::WeightNorm, normalize: false, num_filters: 64, num_lstm_layers: 2, num_residual_layers: 1, overlap: None, // This should be Reflect and not Replicate but Reflect does not work yet. pad_mode: encodec::PadMode::Replicate, residual_kernel_size: 3, sampling_rate: 32_000, target_bandwidths: vec![2.2], trim_right_ratio: 1.0, upsampling_ratios: vec![8, 5, 4, 4], use_causal_conv: false, use_conv_shortcut: false, }; Self { musicgen: Config::musicgen_small(), t5: t5::Config::musicgen_small(), encodec, } } } impl MusicgenForConditionalGeneration { pub fn config(&self) -> &GenConfig { &self.cfg } pub fn load(vb: VarBuilder, cfg: GenConfig) -> Result<Self> { let text_encoder = t5::T5EncoderModel::load(vb.pp("text_encoder"), &cfg.t5)?; let audio_encoder = encodec::Model::new(&cfg.encodec, vb.pp("audio_encoder"))?; let decoder = MusicgenForCausalLM::load(vb.pp("decoder"), &cfg.musicgen)?; Ok(Self { text_encoder, audio_encoder, decoder, cfg, }) } }
candle/candle-examples/examples/musicgen/musicgen_model.rs/0
{ "file_path": "candle/candle-examples/examples/musicgen/musicgen_model.rs", "repo_id": "candle", "token_count": 7592 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::pixtral::{vision_model, Config, Model}; use candle::{DType, Device, Module, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: Model, image: Tensor, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, image: Tensor, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, image, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut generated_tokens = 0usize; let get_token = |v| match self.tokenizer.get_token(v) { Some(token) => Ok(token), None => anyhow::bail!("cannot find the {v} token"), }; let bos_token = get_token("<s>")?; let eos_token = get_token("</s>")?; let inst_token = get_token("[INST]")?; let end_inst_token = get_token("[/INST]")?; let img_break = get_token("[IMG_BREAK]")?; let img_end = get_token("[IMG_END]")?; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let logits = if index > 0 { let context_size = if index > 0 { 1 } else { tokens.len() }; let start_pos = tokens.len().saturating_sub(context_size); let ctxt = &tokens[start_pos..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; self.model.lm_forward(&input)? } else { let (_b, _c, h, w) = self.image.dims4()?; let h = h / self.model.patch_size; let w = w / self.model.patch_size; let image_embeds = self.model.encode_image(&self.image)?; println!("generated image embeddings {image_embeds:?}"); let image_embeds = image_embeds.to_dtype(self.model.dtype)?; for &t in tokens.iter() { if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let break_embeds = { let input = Tensor::new(&[img_break], &self.device)?.unsqueeze(0)?; self.model.language_model.embed_tokens().forward(&input)? }; let start_embeds = { let mut in_tokens = vec![bos_token, inst_token]; in_tokens.extend_from_slice(tokens.as_slice()); let input = Tensor::new(in_tokens.as_slice(), &self.device)?.unsqueeze(0)?; self.model.language_model.embed_tokens().forward(&input)? }; let end_embeds = { let input = Tensor::new(&[img_end, end_inst_token], &self.device)?.unsqueeze(0)?; self.model.language_model.embed_tokens().forward(&input)? }; let mut input_embeds = vec![start_embeds]; for h_idx in 0..h { if h_idx > 0 { input_embeds.push(break_embeds.clone()) } let row = image_embeds.narrow(1, h_idx * w, w)?; input_embeds.push(row); } input_embeds.push(end_embeds); let input_embeds = Tensor::cat(&input_embeds, 1)?; self.model.lm_forward_embeds(&input_embeds)? }; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long, default_value = "Describe the image.\n")] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 10000)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long, default_value = "main")] revision: String, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] config_file: Option<String>, #[arg(long)] weight_files: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, #[arg(long)] image: String, #[arg(long)] vision_only: bool, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let model_id = match &args.model_id { Some(model_id) => model_id.to_string(), None => "mistral-community/pixtral-12b".to_string(), }; let repo = api.repo(Repo::with_revision( model_id, RepoType::Model, args.revision, )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("tokenizer.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?, }; println!("retrieved the files in {:?}", start.elapsed()); let device = candle_examples::device(args.cpu)?; let dtype = if device.supports_bf16() && !args.vision_only { DType::BF16 } else { DType::F32 }; let config: Config = match args.config_file { Some(config_file) => serde_json::from_slice(&std::fs::read(config_file)?)?, None => { let config_file = repo.get("config.json")?; serde_json::from_slice(&std::fs::read(config_file)?)? } }; let image = if args.image.ends_with(".safetensors") { match candle::safetensors::load(&args.image, &device)?.remove("img") { None => anyhow::bail!("no img tensor in {}", args.image), Some(v) => v, } } else { candle_examples::imagenet::load_image_with_std_mean( &args.image, 1024, &[0.48145466, 0.4578275, 0.40821073], &[0.26862954, 0.261_302_6, 0.275_777_1], )? }; let image = image.to_device(&device)?.unsqueeze(0)?; println!("loaded image with shape {:?}", image); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; if args.vision_only { let start = std::time::Instant::now(); let model = vision_model::Model::new(&config.vision_config, vb.pp("vision_tower"))?; println!("loaded the model in {:?}", start.elapsed()); let embs = model.forward(&image)?; println!("EMBS\n{embs}"); } else { let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let model = Model::new(&config, vb)?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, image, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; } Ok(()) }
candle/candle-examples/examples/pixtral/main.rs/0
{ "file_path": "candle/candle-examples/examples/pixtral/main.rs", "repo_id": "candle", "token_count": 5495 }
use std::collections::VecDeque; use rand::distributions::Uniform; use rand::{thread_rng, Rng}; use candle::{DType, Device, Module, Result, Tensor}; use candle_nn::loss::mse; use candle_nn::{linear, seq, Activation, AdamW, Optimizer, VarBuilder, VarMap}; use crate::gym_env::GymEnv; const DEVICE: Device = Device::Cpu; const EPISODES: usize = 200; const BATCH_SIZE: usize = 64; const GAMMA: f64 = 0.99; const LEARNING_RATE: f64 = 0.01; pub fn run() -> Result<()> { let env = GymEnv::new("CartPole-v1")?; // Build the model that predicts the estimated rewards given a specific state. let var_map = VarMap::new(); let vb = VarBuilder::from_varmap(&var_map, DType::F32, &DEVICE); let observation_space = *env.observation_space().first().unwrap(); let model = seq() .add(linear(observation_space, 64, vb.pp("linear_in"))?) .add(Activation::Relu) .add(linear(64, env.action_space(), vb.pp("linear_out"))?); let mut optimizer = AdamW::new_lr(var_map.all_vars(), LEARNING_RATE)?; // Initialize the model's memory. let mut memory = VecDeque::with_capacity(10000); // Start the training loop. let mut state = env.reset(0)?; let mut episode = 0; let mut accumulate_rewards = 0.0; while episode < EPISODES { // Given the current state, predict the estimated rewards, and take the // action that is expected to return the most rewards. let estimated_rewards = model.forward(&state.unsqueeze(0)?)?; let action: u32 = estimated_rewards.squeeze(0)?.argmax(0)?.to_scalar()?; // Take that action in the environment, and memorize the outcome: // - the state for which the action was taken // - the action taken // - the new state resulting of taking that action // - the actual rewards of taking that action // - whether the environment reached a terminal state or not (e.g. game over) let step = env.step(action)?; accumulate_rewards += step.reward; memory.push_back(( state, action, step.state.clone(), step.reward, step.terminated || step.truncated, )); state = step.state; // If there's enough entries in the memory, perform a learning step, where // BATCH_SIZE transitions will be sampled from the memory and will be // fed to the model so that it performs a backward pass. if memory.len() > BATCH_SIZE { // Sample randomly from the memory. let batch = thread_rng() .sample_iter(Uniform::from(0..memory.len())) .take(BATCH_SIZE) .map(|i| memory.get(i).unwrap().clone()) .collect::<Vec<_>>(); // Group all the samples together into tensors with the appropriate shape. let states: Vec<_> = batch.iter().map(|e| e.0.clone()).collect(); let states = Tensor::stack(&states, 0)?; let actions = batch.iter().map(|e| e.1); let actions = Tensor::from_iter(actions, &DEVICE)?.unsqueeze(1)?; let next_states: Vec<_> = batch.iter().map(|e| e.2.clone()).collect(); let next_states = Tensor::stack(&next_states, 0)?; let rewards = batch.iter().map(|e| e.3 as f32); let rewards = Tensor::from_iter(rewards, &DEVICE)?.unsqueeze(1)?; let non_final_mask = batch.iter().map(|e| !e.4 as u8 as f32); let non_final_mask = Tensor::from_iter(non_final_mask, &DEVICE)?.unsqueeze(1)?; // Get the estimated rewards for the actions that where taken at each step. let estimated_rewards = model.forward(&states)?; let x = estimated_rewards.gather(&actions, 1)?; // Get the maximum expected rewards for the next state, apply them a discount rate // GAMMA and add them to the rewards that were actually gathered on the current state. // If the next state is a terminal state, just omit maximum estimated // rewards for that state. let expected_rewards = model.forward(&next_states)?.detach(); let y = expected_rewards.max_keepdim(1)?; let y = (y * GAMMA * non_final_mask + rewards)?; // Compare the estimated rewards with the maximum expected rewards and // perform the backward step. let loss = mse(&x, &y)?; optimizer.backward_step(&loss)?; } // If we are on a terminal state, reset the environment and log how it went. if step.terminated || step.truncated { episode += 1; println!("Episode {episode} | Rewards {}", accumulate_rewards as i64); state = env.reset(0)?; accumulate_rewards = 0.0; } } Ok(()) }
candle/candle-examples/examples/reinforcement-learning/dqn.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/dqn.rs", "repo_id": "candle", "token_count": 2032 }
use candle::Device; use candle::Module; use candle_nn::VarBuilder; use candle_transformers::models::segformer::{ Config, ImageClassificationModel, SemanticSegmentationModel, }; use clap::{Args, Parser, Subcommand}; use imageproc::image::Rgb; use imageproc::integral_image::ArrayData; use std::collections::HashMap; use std::path::PathBuf; #[derive(Parser)] #[clap(about, version, long_about = None)] struct CliArgs { #[arg(long, help = "use cpu")] cpu: bool, #[command(subcommand)] command: Commands, } #[derive(Args, Debug)] struct SegmentationArgs { #[arg( long, help = "name of the huggingface hub model", default_value = "nvidia/segformer-b0-finetuned-ade-512-512" )] model_name: String, #[arg( long, help = "path to the label file in json format", default_value = "candle-examples/examples/segformer/assets/labels.json" )] label_path: PathBuf, #[arg(long, help = "path to for the output mask image")] output_path: PathBuf, #[arg(help = "path to image as input")] image: PathBuf, } #[derive(Args, Debug)] struct ClassificationArgs { #[arg( long, help = "name of the huggingface hub model", default_value = "paolinox/segformer-finetuned-food101" )] model_name: String, #[arg(help = "path to image as input")] image: PathBuf, } #[derive(Subcommand, Debug)] enum Commands { Segment(SegmentationArgs), Classify(ClassificationArgs), } fn get_vb_and_config(model_name: String, device: &Device) -> anyhow::Result<(VarBuilder, Config)> { println!("loading model {} via huggingface hub", model_name); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name.clone()); let model_file = api.get("model.safetensors")?; println!("model {} downloaded and loaded", model_name); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], candle::DType::F32, device)? }; let config = std::fs::read_to_string(api.get("config.json")?)?; let config: Config = serde_json::from_str(&config)?; println!("{:?}", config); Ok((vb, config)) } #[derive(Debug, serde::Deserialize)] struct LabelItem { index: u32, color: String, } fn segmentation_task(args: SegmentationArgs, device: &Device) -> anyhow::Result<()> { let label_file = std::fs::read_to_string(&args.label_path)?; let label_items: Vec<LabelItem> = serde_json::from_str(&label_file)?; let label_colors: HashMap<u32, Rgb<u8>> = label_items .iter() .map(|x| { (x.index - 1, { let color = x.color.trim_start_matches('#'); let r = u8::from_str_radix(&color[0..2], 16).unwrap(); let g = u8::from_str_radix(&color[2..4], 16).unwrap(); let b = u8::from_str_radix(&color[4..6], 16).unwrap(); Rgb([r, g, b]) }) }) .collect(); let image = candle_examples::imagenet::load_image224(args.image)? .unsqueeze(0)? .to_device(device)?; let (vb, config) = get_vb_and_config(args.model_name, device)?; let num_labels = label_items.len(); let model = SemanticSegmentationModel::new(&config, num_labels, vb)?; let segmentations = model.forward(&image)?; // generate a mask image let mask = &segmentations.squeeze(0)?.argmax(0)?; let (h, w) = mask.dims2()?; let mask = mask.flatten_all()?.to_vec1::<u32>()?; let mask = mask .iter() .flat_map(|x| label_colors[x].data()) .collect::<Vec<u8>>(); let mask: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = image::ImageBuffer::from_raw(w as u32, h as u32, mask).unwrap(); // resize let mask = image::DynamicImage::from(mask); let mask = mask.resize_to_fill( w as u32 * 4, h as u32 * 4, image::imageops::FilterType::CatmullRom, ); mask.save(args.output_path.clone())?; println!("mask image saved to {:?}", args.output_path); Ok(()) } fn classification_task(args: ClassificationArgs, device: &Device) -> anyhow::Result<()> { let image = candle_examples::imagenet::load_image224(args.image)? .unsqueeze(0)? .to_device(device)?; let (vb, config) = get_vb_and_config(args.model_name, device)?; let num_labels = 7; let model = ImageClassificationModel::new(&config, num_labels, vb)?; let classification = model.forward(&image)?; let classification = candle_nn::ops::softmax_last_dim(&classification)?; let classification = classification.squeeze(0)?; println!( "classification logits {:?}", classification.to_vec1::<f32>()? ); let label_id = classification.argmax(0)?.to_scalar::<u32>()?; let label_id = format!("{}", label_id); println!("label: {}", config.id2label[&label_id]); Ok(()) } pub fn main() -> anyhow::Result<()> { let args = CliArgs::parse(); let device = candle_examples::device(args.cpu)?; if let Commands::Segment(args) = args.command { segmentation_task(args, &device)? } else if let Commands::Classify(args) = args.command { classification_task(args, &device)? } Ok(()) }
candle/candle-examples/examples/segformer/main.rs/0
{ "file_path": "candle/candle-examples/examples/segformer/main.rs", "repo_id": "candle", "token_count": 2229 }
use anyhow::{Ok, Result}; use candle::{DType, IndexOp, Tensor}; use candle_transformers::models::flux; use candle_transformers::models::mmdit::model::MMDiT; pub struct SkipLayerGuidanceConfig { pub scale: f64, pub start: f64, pub end: f64, pub layers: Vec<usize>, } #[allow(clippy::too_many_arguments)] pub fn euler_sample( mmdit: &MMDiT, y: &Tensor, context: &Tensor, num_inference_steps: usize, cfg_scale: f64, time_shift: f64, height: usize, width: usize, slg_config: Option<SkipLayerGuidanceConfig>, ) -> Result<Tensor> { let mut x = flux::sampling::get_noise(1, height, width, y.device())?.to_dtype(DType::F16)?; let sigmas = (0..=num_inference_steps) .map(|x| x as f64 / num_inference_steps as f64) .rev() .map(|x| time_snr_shift(time_shift, x)) .collect::<Vec<f64>>(); for (step, window) in sigmas.windows(2).enumerate() { let (s_curr, s_prev) = match window { [a, b] => (a, b), _ => continue, }; let timestep = (*s_curr) * 1000.0; let noise_pred = mmdit.forward( &Tensor::cat(&[&x, &x], 0)?, &Tensor::full(timestep as f32, (2,), x.device())?.contiguous()?, y, context, None, )?; let mut guidance = apply_cfg(cfg_scale, &noise_pred)?; if let Some(slg_config) = slg_config.as_ref() { if (num_inference_steps as f64) * slg_config.start < (step as f64) && (step as f64) < (num_inference_steps as f64) * slg_config.end { let slg_noise_pred = mmdit.forward( &x, &Tensor::full(timestep as f32, (1,), x.device())?.contiguous()?, &y.i(..1)?, &context.i(..1)?, Some(&slg_config.layers), )?; guidance = (guidance + (slg_config.scale * (noise_pred.i(..1)? - slg_noise_pred.i(..1))?)?)?; } } x = (x + (guidance * (*s_prev - *s_curr))?)?; } Ok(x) } // The "Resolution-dependent shifting of timestep schedules" recommended in the SD3 tech report paper // https://arxiv.org/pdf/2403.03206 // Following the implementation in ComfyUI: // https://github.com/comfyanonymous/ComfyUI/blob/3c60ecd7a83da43d694e26a77ca6b93106891251/ // comfy/model_sampling.py#L181 fn time_snr_shift(alpha: f64, t: f64) -> f64 { alpha * t / (1.0 + (alpha - 1.0) * t) } fn apply_cfg(cfg_scale: f64, noise_pred: &Tensor) -> Result<Tensor> { Ok(((cfg_scale * noise_pred.narrow(0, 0, 1)?)? - ((cfg_scale - 1.0) * noise_pred.narrow(0, 1, 1)?)?)?) }
candle/candle-examples/examples/stable-diffusion-3/sampling.rs/0
{ "file_path": "candle/candle-examples/examples/stable-diffusion-3/sampling.rs", "repo_id": "candle", "token_count": 1404 }
# candle-trocr `TrOCR` is a transformer OCR Model. In this example it is used to transcribe image text. See the associated [model card](https://huggingface.co/microsoft/trocr-base-printed) for details on the model itself. Supported models include: - `--which base`: small handwritten OCR model. - `--which large`: large handwritten OCR model. - `--which base-printed`: small printed OCR model. - `--which large-printed`: large printed OCR model. ## Running an example ```bash cargo run --example trocr --release -- --image candle-examples/examples/trocr/assets/trocr.png cargo run --example trocr --release -- --which large --image candle-examples/examples/trocr/assets/trocr.png cargo run --example trocr --release -- --which base-printed --image candle-examples/examples/trocr/assets/noto.png cargo run --example trocr --release -- --which large-printed --image candle-examples/examples/trocr/assets/noto.png ``` ### Outputs ``` industry , Mr. Brown commented icily . " Let us have a industry , " Mr. Brown commented icily . " Let us have a THE QUICK BROWN FOR JUMPS OVER THE LAY DOG THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG ```
candle/candle-examples/examples/trocr/readme.md/0
{ "file_path": "candle/candle-examples/examples/trocr/readme.md", "repo_id": "candle", "token_count": 360 }
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use candle_transformers::models::stable_diffusion; use candle_transformers::models::wuerstchen; use anyhow::{Error as E, Result}; use candle::{DType, Device, IndexOp, Tensor}; use clap::Parser; use tokenizers::Tokenizer; const PRIOR_GUIDANCE_SCALE: f64 = 4.0; const RESOLUTION_MULTIPLE: f64 = 42.67; const LATENT_DIM_SCALE: f64 = 10.67; const PRIOR_CIN: usize = 16; const DECODER_CIN: usize = 4; #[derive(Parser)] #[command(author, version, about, long_about = None)] struct Args { /// The prompt to be used for image generation. #[arg( long, default_value = "A very realistic photo of a rusty robot walking on a sandy beach" )] prompt: String, #[arg(long, default_value = "")] uncond_prompt: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] use_flash_attn: bool, /// The height in pixels of the generated image. #[arg(long)] height: Option<usize>, /// The width in pixels of the generated image. #[arg(long)] width: Option<usize>, /// The decoder weight file, in .safetensors format. #[arg(long, value_name = "FILE")] decoder_weights: Option<String>, /// The CLIP weight file, in .safetensors format. #[arg(long, value_name = "FILE")] clip_weights: Option<String>, /// The CLIP weight file used by the prior model, in .safetensors format. #[arg(long, value_name = "FILE")] prior_clip_weights: Option<String>, /// The prior weight file, in .safetensors format. #[arg(long, value_name = "FILE")] prior_weights: Option<String>, /// The VQGAN weight file, in .safetensors format. #[arg(long, value_name = "FILE")] vqgan_weights: Option<String>, #[arg(long, value_name = "FILE")] /// The file specifying the tokenizer to used for tokenization. tokenizer: Option<String>, #[arg(long, value_name = "FILE")] /// The file specifying the tokenizer to used for prior tokenization. prior_tokenizer: Option<String>, /// The number of samples to generate. #[arg(long, default_value_t = 1)] num_samples: i64, /// The name of the final image to generate. #[arg(long, value_name = "FILE", default_value = "sd_final.png")] final_image: String, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum ModelFile { Tokenizer, PriorTokenizer, Clip, PriorClip, Decoder, VqGan, Prior, } impl ModelFile { fn get(&self, filename: Option<String>) -> Result<std::path::PathBuf> { use hf_hub::api::sync::Api; match filename { Some(filename) => Ok(std::path::PathBuf::from(filename)), None => { let repo_main = "warp-ai/wuerstchen"; let repo_prior = "warp-ai/wuerstchen-prior"; let (repo, path) = match self { Self::Tokenizer => (repo_main, "tokenizer/tokenizer.json"), Self::PriorTokenizer => (repo_prior, "tokenizer/tokenizer.json"), Self::Clip => (repo_main, "text_encoder/model.safetensors"), Self::PriorClip => (repo_prior, "text_encoder/model.safetensors"), Self::Decoder => (repo_main, "decoder/diffusion_pytorch_model.safetensors"), Self::VqGan => (repo_main, "vqgan/diffusion_pytorch_model.safetensors"), Self::Prior => (repo_prior, "prior/diffusion_pytorch_model.safetensors"), }; let filename = Api::new()?.model(repo.to_string()).get(path)?; Ok(filename) } } } } fn output_filename( basename: &str, sample_idx: i64, num_samples: i64, timestep_idx: Option<usize>, ) -> String { let filename = if num_samples > 1 { match basename.rsplit_once('.') { None => format!("{basename}.{sample_idx}.png"), Some((filename_no_extension, extension)) => { format!("{filename_no_extension}.{sample_idx}.{extension}") } } } else { basename.to_string() }; match timestep_idx { None => filename, Some(timestep_idx) => match filename.rsplit_once('.') { None => format!("{filename}-{timestep_idx}.png"), Some((filename_no_extension, extension)) => { format!("{filename_no_extension}-{timestep_idx}.{extension}") } }, } } fn encode_prompt( prompt: &str, uncond_prompt: Option<&str>, tokenizer: std::path::PathBuf, clip_weights: std::path::PathBuf, clip_config: stable_diffusion::clip::Config, device: &Device, ) -> Result<Tensor> { let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let pad_id = match &clip_config.pad_with { Some(padding) => *tokenizer.get_vocab(true).get(padding.as_str()).unwrap(), None => *tokenizer.get_vocab(true).get("<|endoftext|>").unwrap(), }; println!("Running with prompt \"{prompt}\"."); let mut tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let tokens_len = tokens.len(); while tokens.len() < clip_config.max_position_embeddings { tokens.push(pad_id) } let tokens = Tensor::new(tokens.as_slice(), device)?.unsqueeze(0)?; println!("Building the clip transformer."); let text_model = stable_diffusion::build_clip_transformer(&clip_config, clip_weights, device, DType::F32)?; let text_embeddings = text_model.forward_with_mask(&tokens, tokens_len - 1)?; match uncond_prompt { None => Ok(text_embeddings), Some(uncond_prompt) => { let mut uncond_tokens = tokenizer .encode(uncond_prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let uncond_tokens_len = uncond_tokens.len(); while uncond_tokens.len() < clip_config.max_position_embeddings { uncond_tokens.push(pad_id) } let uncond_tokens = Tensor::new(uncond_tokens.as_slice(), device)?.unsqueeze(0)?; let uncond_embeddings = text_model.forward_with_mask(&uncond_tokens, uncond_tokens_len - 1)?; let text_embeddings = Tensor::cat(&[text_embeddings, uncond_embeddings], 0)?; Ok(text_embeddings) } } } fn run(args: Args) -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let Args { prompt, uncond_prompt, cpu, height, width, tokenizer, final_image, num_samples, clip_weights, prior_weights, vqgan_weights, decoder_weights, tracing, .. } = args; let _guard = if tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let device = candle_examples::device(cpu)?; let height = height.unwrap_or(1024); let width = width.unwrap_or(1024); let prior_text_embeddings = { let tokenizer = ModelFile::PriorTokenizer.get(args.prior_tokenizer)?; let weights = ModelFile::PriorClip.get(args.prior_clip_weights)?; encode_prompt( &prompt, Some(&uncond_prompt), tokenizer.clone(), weights, stable_diffusion::clip::Config::wuerstchen_prior(), &device, )? }; println!("generated prior text embeddings {prior_text_embeddings:?}"); let text_embeddings = { let tokenizer = ModelFile::Tokenizer.get(tokenizer)?; let weights = ModelFile::Clip.get(clip_weights)?; encode_prompt( &prompt, None, tokenizer.clone(), weights, stable_diffusion::clip::Config::wuerstchen(), &device, )? }; println!("generated text embeddings {text_embeddings:?}"); println!("Building the prior."); let b_size = 1; let image_embeddings = { // https://huggingface.co/warp-ai/wuerstchen-prior/blob/main/prior/config.json let latent_height = (height as f64 / RESOLUTION_MULTIPLE).ceil() as usize; let latent_width = (width as f64 / RESOLUTION_MULTIPLE).ceil() as usize; let mut latents = Tensor::randn( 0f32, 1f32, (b_size, PRIOR_CIN, latent_height, latent_width), &device, )?; let prior = { let file = ModelFile::Prior.get(prior_weights)?; let vb = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[file], DType::F32, &device)? }; wuerstchen::prior::WPrior::new( /* c_in */ PRIOR_CIN, /* c */ 1536, /* c_cond */ 1280, /* c_r */ 64, /* depth */ 32, /* nhead */ 24, args.use_flash_attn, vb, )? }; let prior_scheduler = wuerstchen::ddpm::DDPMWScheduler::new(60, Default::default())?; let timesteps = prior_scheduler.timesteps(); let timesteps = &timesteps[..timesteps.len() - 1]; println!("prior denoising"); for (index, &t) in timesteps.iter().enumerate() { let start_time = std::time::Instant::now(); let latent_model_input = Tensor::cat(&[&latents, &latents], 0)?; let ratio = (Tensor::ones(2, DType::F32, &device)? * t)?; let noise_pred = prior.forward(&latent_model_input, &ratio, &prior_text_embeddings)?; let noise_pred = noise_pred.chunk(2, 0)?; let (noise_pred_text, noise_pred_uncond) = (&noise_pred[0], &noise_pred[1]); let noise_pred = (noise_pred_uncond + ((noise_pred_text - noise_pred_uncond)? * PRIOR_GUIDANCE_SCALE)?)?; latents = prior_scheduler.step(&noise_pred, t, &latents)?; let dt = start_time.elapsed().as_secs_f32(); println!("step {}/{} done, {:.2}s", index + 1, timesteps.len(), dt); } ((latents * 42.)? - 1.)? }; println!("Building the vqgan."); let vqgan = { let file = ModelFile::VqGan.get(vqgan_weights)?; let vb = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[file], DType::F32, &device)? }; wuerstchen::paella_vq::PaellaVQ::new(vb)? }; println!("Building the decoder."); // https://huggingface.co/warp-ai/wuerstchen/blob/main/decoder/config.json let decoder = { let file = ModelFile::Decoder.get(decoder_weights)?; let vb = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[file], DType::F32, &device)? }; wuerstchen::diffnext::WDiffNeXt::new( /* c_in */ DECODER_CIN, /* c_out */ DECODER_CIN, /* c_r */ 64, /* c_cond */ 1024, /* clip_embd */ 1024, /* patch_size */ 2, args.use_flash_attn, vb, )? }; for idx in 0..num_samples { // https://huggingface.co/warp-ai/wuerstchen/blob/main/model_index.json let latent_height = (image_embeddings.dim(2)? as f64 * LATENT_DIM_SCALE) as usize; let latent_width = (image_embeddings.dim(3)? as f64 * LATENT_DIM_SCALE) as usize; let mut latents = Tensor::randn( 0f32, 1f32, (b_size, DECODER_CIN, latent_height, latent_width), &device, )?; println!("diffusion process with prior {image_embeddings:?}"); let scheduler = wuerstchen::ddpm::DDPMWScheduler::new(12, Default::default())?; let timesteps = scheduler.timesteps(); let timesteps = &timesteps[..timesteps.len() - 1]; for (index, &t) in timesteps.iter().enumerate() { let start_time = std::time::Instant::now(); let ratio = (Tensor::ones(1, DType::F32, &device)? * t)?; let noise_pred = decoder.forward(&latents, &ratio, &image_embeddings, Some(&text_embeddings))?; latents = scheduler.step(&noise_pred, t, &latents)?; let dt = start_time.elapsed().as_secs_f32(); println!("step {}/{} done, {:.2}s", index + 1, timesteps.len(), dt); } println!( "Generating the final image for sample {}/{}.", idx + 1, num_samples ); let image = vqgan.decode(&(&latents * 0.3764)?)?; let image = (image.clamp(0f32, 1f32)? * 255.)? .to_dtype(DType::U8)? .i(0)?; let image_filename = output_filename(&final_image, idx + 1, num_samples, None); candle_examples::save_image(&image, image_filename)? } Ok(()) } fn main() -> Result<()> { let args = Args::parse(); run(args) }
candle/candle-examples/examples/wuerstchen/main.rs/0
{ "file_path": "candle/candle-examples/examples/wuerstchen/main.rs", "repo_id": "candle", "token_count": 6372 }
use candle::{Result, Tensor}; // https://github.com/facebookresearch/audiocraft/blob/69fea8b290ad1b4b40d28f92d1dfc0ab01dbab85/audiocraft/data/audio_utils.py#L57 pub fn normalize_loudness( wav: &Tensor, sample_rate: u32, loudness_compressor: bool, ) -> Result<Tensor> { let energy = wav.sqr()?.mean_all()?.sqrt()?.to_vec0::<f32>()?; if energy < 2e-3 { return Ok(wav.clone()); } let wav_array = wav.to_vec1::<f32>()?; let mut meter = crate::bs1770::ChannelLoudnessMeter::new(sample_rate); meter.push(wav_array.into_iter()); let power = meter.as_100ms_windows(); let loudness = match crate::bs1770::gated_mean(power) { None => return Ok(wav.clone()), Some(gp) => gp.loudness_lkfs() as f64, }; let delta_loudness = -14. - loudness; let gain = 10f64.powf(delta_loudness / 20.); let wav = (wav * gain)?; if loudness_compressor { wav.tanh() } else { Ok(wav) } }
candle/candle-examples/src/audio.rs/0
{ "file_path": "candle/candle-examples/src/audio.rs", "repo_id": "candle", "token_count": 458 }
/****************************************************************************** * Copyright (c) 2024, Tri Dao. ******************************************************************************/ #pragma once // #include "philox_unpack.cuh" // For at::cuda::philox::unpack #include <cute/tensor.hpp> #include <cutlass/cutlass.h> #include <cutlass/array.h> #include <cutlass/numeric_types.h> #include "block_info.h" #include "kernel_traits.h" #include "utils.h" #include "softmax.h" #include "mask.h" #include "dropout.h" #include "rotary.h" namespace flash { using namespace cute; //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename ElementAccum, typename Params, int kBlockM, bool Is_even_MN> __forceinline__ __device__ auto get_lse_tile(const Params &params, const int bidb, const int bidh, const int m_block, const BlockInfo</*Varlen=*/!Is_even_MN> &binfo) { // When params.unpadded_lse is false, LSE is written as (b, h, seqlen_q) - this is non-variable seqlen path. // Otherwise, when params.seqlenq_ngroups_swapped is true, it is written as (h, seqlen_q, b) to account for seqlen_q <-> h swapping trick. // Otherwise, it's written as (h, b, seqlen_q). const bool varlen_q = params.unpadded_lse && !params.seqlenq_ngroups_swapped; auto lse_offset = varlen_q ? binfo.q_offset(params.seqlen_q, 1, bidb) : 0; auto gmem_ptr_lse = make_gmem_ptr(reinterpret_cast<ElementAccum*>(params.softmax_lse_ptr) + lse_offset); auto lse_shape = varlen_q ? make_shape(1, params.h, params.total_q) : make_shape(params.b, params.h, params.seqlen_q); auto lse_stride = params.seqlenq_ngroups_swapped ? make_stride(1, params.seqlen_q * params.b, params.b) : ( params.unpadded_lse ? make_stride(params.h * params.total_q, params.total_q, 1) : make_stride(params.h * params.seqlen_q, params.seqlen_q, 1) ); auto lse_layout = make_layout(lse_shape, lse_stride); Tensor mLSE = make_tensor(gmem_ptr_lse, lse_layout); auto mLSE_slice = varlen_q ? mLSE(0, bidh, _) : mLSE(bidb, bidh, _); return local_tile(mLSE_slice, Shape<Int<kBlockM>>{}, make_coord(m_block)); } template<typename Kernel_traits, bool Is_dropout, bool Is_causal, bool Is_local, bool Has_alibi, bool Is_even_MN, bool Is_even_K, bool Is_softcap, bool Return_softmax, typename Params> inline __device__ void compute_attn_1rowblock(const Params &params, const int bidb, const int bidh, const int m_block) { using Element = typename Kernel_traits::Element; using ElementAccum = typename Kernel_traits::ElementAccum; using index_t = typename Kernel_traits::index_t; // Shared memory. extern __shared__ char smem_[]; // The thread index. const int tidx = threadIdx.x; constexpr int kBlockM = Kernel_traits::kBlockM; constexpr int kBlockN = Kernel_traits::kBlockN; constexpr int kHeadDim = Kernel_traits::kHeadDim; constexpr int kNWarps = Kernel_traits::kNWarps; auto seed_offset = std::make_tuple(0ull, 0ull); // auto seed_offset = at::cuda::philox::unpack(params.philox_args); flash::Dropout dropout(std::get<0>(seed_offset), std::get<1>(seed_offset), params.p_dropout_in_uint8_t, bidb, bidh, tidx, params.h); // Save seed and offset for backward, before any early exiting. Otherwise the 0-th thread block might // exit early and no one saves the rng states. if (Is_dropout && blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && tidx == 0) { params.rng_state[0] = std::get<0>(seed_offset); params.rng_state[1] = std::get<1>(seed_offset); } const BlockInfo</*Varlen=*/!Is_even_MN> binfo(params, bidb); if (m_block * kBlockM >= binfo.actual_seqlen_q) return; const int n_block_min = !Is_local ? 0 : std::max(0, (m_block * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q - params.window_size_left) / kBlockN); int n_block_max = cute::ceil_div(binfo.actual_seqlen_k, kBlockN); if (Is_causal || Is_local) { n_block_max = std::min(n_block_max, cute::ceil_div((m_block + 1) * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q + params.window_size_right, kBlockN)); // if (threadIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) { // printf("m_block = %d, n_block_max = %d\n", m_block, n_block_max); // } } // We exit early and write 0 to gO and gLSE. This also covers the case where actual_seqlen_k == 0. // Otherwise we might read OOB elements from gK and gV. if ((Is_causal || Is_local || !Is_even_MN) && n_block_max <= n_block_min) { Tensor mO = make_tensor(make_gmem_ptr(reinterpret_cast<Element*>(params.o_ptr) + binfo.q_offset(params.o_batch_stride, params.o_row_stride, bidb)), make_shape(binfo.actual_seqlen_q, params.h, params.d), make_stride(params.o_row_stride, params.o_head_stride, _1{})); Tensor gO = local_tile(mO(_, bidh, _), Shape<Int<kBlockM>, Int<kHeadDim>>{}, make_coord(m_block, 0)); // (kBlockM, kHeadDim) Tensor gLSE = get_lse_tile<ElementAccum, Params, kBlockM, Is_even_MN>(params, bidb, bidh, m_block, binfo); typename Kernel_traits::GmemTiledCopyO gmem_tiled_copy_O; auto gmem_thr_copy_O = gmem_tiled_copy_O.get_thread_slice(tidx); Tensor tOgO = gmem_thr_copy_O.partition_D(gO); Tensor tOrO = make_tensor<Element>(shape(tOgO)); clear(tOrO); // Construct identity layout for sO Tensor cO = make_identity_tensor(make_shape(size<0>(gO), size<1>(gO))); // (BLK_M,BLK_K) -> (blk_m,blk_k) // Repeat the partitioning with identity layouts Tensor tOcO = gmem_thr_copy_O.partition_D(cO); Tensor tOpO = make_tensor<bool>(make_shape(size<2>(tOgO))); if (!Is_even_K) { #pragma unroll for (int k = 0; k < size(tOpO); ++k) { tOpO(k) = get<1>(tOcO(0, 0, k)) < params.d; } } // Clear_OOB_K must be false since we don't want to write zeros to gmem flash::copy<Is_even_MN, Is_even_K, /*Clear_OOB_MN=*/false, /*Clear_OOB_K=*/false>( gmem_tiled_copy_O, tOrO, tOgO, tOcO, tOpO, binfo.actual_seqlen_q - m_block * kBlockM ); #pragma unroll for (int m = 0; m < size<1>(tOgO); ++m) { const int row = get<0>(tOcO(0, m, 0)); if (row < binfo.actual_seqlen_q - m_block * kBlockM && get<1>(tOcO(0, m, 0)) == 0) { gLSE(row) = INFINITY; } } return; } // if (tidx == 0) { printf("m_block = %d, n_block_min = %d, n_block_max = %d\n", m_block, n_block_min, n_block_max); } // We iterate over the blocks in reverse order. This is because the last block is the only one // that needs masking when we read K and V from global memory. Moreover, iterating in reverse // might save us 1 register (we just need n_block instead of both n_block and n_block_max). const index_t row_offset_p = ((bidb * params.h + bidh) * params.seqlen_q_rounded + m_block * kBlockM) * params.seqlen_k_rounded + (n_block_max - 1) * kBlockN; Tensor mQ = make_tensor(make_gmem_ptr(reinterpret_cast<Element*>(params.q_ptr) + binfo.q_offset(params.q_batch_stride, params.q_row_stride, bidb)), make_shape(binfo.actual_seqlen_q, params.h, params.d), make_stride(params.q_row_stride, params.q_head_stride, _1{})); Tensor gQ = local_tile(mQ(_, bidh, _), Shape<Int<kBlockM>, Int<kHeadDim>>{}, make_coord(m_block, 0)); // (kBlockM, kHeadDim) Tensor mK = make_tensor(make_gmem_ptr(reinterpret_cast<Element*>(params.k_ptr) + binfo.k_offset(params.k_batch_stride, params.k_row_stride, bidb)), make_shape(binfo.actual_seqlen_k, params.h_k, params.d), make_stride(params.k_row_stride, params.k_head_stride, _1{})); Tensor gK = local_tile(mK(_, bidh / params.h_h_k_ratio, _), Shape<Int<kBlockN>, Int<kHeadDim>>{}, make_coord(_, 0)); // (kBlockN, kHeadDim, nblocksN) Tensor mV = make_tensor(make_gmem_ptr(reinterpret_cast<Element*>(params.v_ptr) + binfo.k_offset(params.v_batch_stride, params.v_row_stride, bidb)), make_shape(binfo.actual_seqlen_k, params.h_k, params.d), make_stride(params.v_row_stride, params.v_head_stride, _1{})); Tensor gV = local_tile(mV(_, bidh / params.h_h_k_ratio, _), Shape<Int<kBlockN>, Int<kHeadDim>>{}, make_coord(_, 0)); // (kBlockN, kHeadDim, nblocksN) Tensor gP = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.p_ptr) + row_offset_p), Shape<Int<kBlockM>, Int<kBlockN>>{}, make_stride(params.seqlen_k_rounded, _1{})); Tensor sQ = make_tensor(make_smem_ptr(reinterpret_cast<Element *>(smem_)), typename Kernel_traits::SmemLayoutQ{}); // Careful we're using the same smem for sQ and sK | sV if Share_Q_K_smem; Tensor sK = make_tensor(sQ.data() + (Kernel_traits::Share_Q_K_smem ? 0 : size(sQ)), typename Kernel_traits::SmemLayoutKV{}); Tensor sV = make_tensor(sK.data() + size(sK), typename Kernel_traits::SmemLayoutKV{}); Tensor sVt = make_tensor(sV.data(), typename Kernel_traits::SmemLayoutVtransposed{}); Tensor sVtNoSwizzle = make_tensor(sV.data().get(), typename Kernel_traits::SmemLayoutVtransposedNoSwizzle{}); typename Kernel_traits::GmemTiledCopyQKV gmem_tiled_copy_QKV; auto gmem_thr_copy_QKV = gmem_tiled_copy_QKV.get_thread_slice(tidx); Tensor tQgQ = gmem_thr_copy_QKV.partition_S(gQ); Tensor tQsQ = gmem_thr_copy_QKV.partition_D(sQ); Tensor tKgK = gmem_thr_copy_QKV.partition_S(gK); // (KCPY, KCPY_N, KCPY_K, nblocksN) Tensor tKsK = gmem_thr_copy_QKV.partition_D(sK); Tensor tVgV = gmem_thr_copy_QKV.partition_S(gV); // (VCPY, VCPY_N, VCPY_K, nblocksN) Tensor tVsV = gmem_thr_copy_QKV.partition_D(sV); typename Kernel_traits::TiledMma tiled_mma; auto thr_mma = tiled_mma.get_thread_slice(tidx); Tensor tSrQ = thr_mma.partition_fragment_A(sQ); // (MMA,MMA_M,MMA_K) Tensor tSrK = thr_mma.partition_fragment_B(sK); // (MMA,MMA_N,MMA_K) Tensor tOrVt = thr_mma.partition_fragment_B(sVtNoSwizzle); // (MMA, MMA_K,MMA_N) Tensor tSgS = thr_mma.partition_C(gP); Tensor acc_o = partition_fragment_C(tiled_mma, Shape<Int<kBlockM>, Int<kHeadDim>>{}); // MMA, MMA_M, MMA_K // // Copy Atom retiling // auto smem_tiled_copy_Q = make_tiled_copy_A(typename Kernel_traits::SmemCopyAtom{}, tiled_mma); auto smem_thr_copy_Q = smem_tiled_copy_Q.get_thread_slice(tidx); // if (cute::thread0()) {smem_thr_copy_Q.print_all();} Tensor tSsQ = smem_thr_copy_Q.partition_S(sQ); // if (cute::thread0()) {print(tSsQ.layout()); printf("\n");} auto smem_tiled_copy_K = make_tiled_copy_B(typename Kernel_traits::SmemCopyAtom{}, tiled_mma); auto smem_thr_copy_K = smem_tiled_copy_K.get_thread_slice(tidx); Tensor tSsK = smem_thr_copy_K.partition_S(sK); auto smem_tiled_copy_V = make_tiled_copy_B(typename Kernel_traits::SmemCopyAtomTransposed{}, tiled_mma); auto smem_thr_copy_V = smem_tiled_copy_V.get_thread_slice(tidx); Tensor tOsVt = smem_thr_copy_V.partition_S(sVt); // // PREDICATES // // // Allocate predicate tensors for m and n // Tensor tQpQ = make_tensor<bool>(make_shape(size<1>(tQsQ), size<2>(tQsQ)), Stride<_1,_0>{}); // Tensor tKVpKV = make_tensor<bool>(make_shape(size<1>(tKsK), size<2>(tKsK)), Stride<_1,_0>{}); // Construct identity layout for sQ and sK Tensor cQ = make_identity_tensor(make_shape(size<0>(sQ), size<1>(sQ))); // (BLK_M,BLK_K) -> (blk_m,blk_k) Tensor cKV = make_identity_tensor(make_shape(size<0>(sK), size<1>(sK))); // (BLK_N,BLK_K) -> (blk_n,blk_k) // Tensor tScQ = thr_mma.partition_A(cQ); // (MMA,MMA_M,MMA_K) // if (cute::thread0()) { // print(tScQ.layout()); printf("\n"); // for (int i = 0; i < size(tScQ); ++i) { // printf("%d ", get<0>(tScQ(i))); // } // printf("\n"); // for (int i = 0; i < size(tScQ); ++i) { // printf("%d ", get<1>(tScQ(i))); // } // printf("\n"); // } // Repeat the partitioning with identity layouts Tensor tQcQ = gmem_thr_copy_QKV.partition_S(cQ); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k) Tensor tKVcKV = gmem_thr_copy_QKV.partition_S(cKV); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k) // Allocate predicate tensors for k Tensor tQpQ = make_tensor<bool>(make_shape(size<2>(tQsQ))); Tensor tKVpKV = make_tensor<bool>(make_shape(size<2>(tKsK))); // Set predicates for k bounds if (!Is_even_K) { #pragma unroll for (int k = 0; k < size(tQpQ); ++k) { tQpQ(k) = get<1>(tQcQ(0, 0, k)) < params.d; } #pragma unroll for (int k = 0; k < size(tKVpKV); ++k) { tKVpKV(k) = get<1>(tKVcKV(0, 0, k)) < params.d; } } // Prologue // We don't need to clear the sQ smem tiles since we'll only write out the valid outputs flash::copy<Is_even_MN, Is_even_K>(gmem_tiled_copy_QKV, tQgQ, tQsQ, tQcQ, tQpQ, binfo.actual_seqlen_q - m_block * kBlockM); if (Kernel_traits::Is_Q_in_regs) { cute::cp_async_fence(); } // // if (cute::thread(1, 0)) { print(tQsQ); } // // Tensor sQNoSwizzle = make_tensor(make_smem_ptr(reinterpret_cast<Element *>(smem_)), typename Kernel_traits::SmemLayoutQNoSwizzle{}); // // if (cute::thread0()) { print(sQNoSwizzle); } if (Kernel_traits::Share_Q_K_smem) { flash::cp_async_wait<0>(); __syncthreads(); Tensor tSrQ_copy_view = smem_thr_copy_Q.retile_D(tSrQ); CUTE_STATIC_ASSERT_V(size<1>(tSsQ) == size<1>(tSrQ_copy_view)); // M cute::copy(smem_tiled_copy_Q, tSsQ, tSrQ_copy_view); __syncthreads(); } int n_block = n_block_max - 1; // We don't need to clear the sK smem tiles since we'll mask out the scores anyway. flash::copy<Is_even_MN, Is_even_K>(gmem_tiled_copy_QKV, tKgK(_, _, _, n_block), tKsK, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN); cute::cp_async_fence(); // if (threadIdx.x == 0 && blockIdx.y == 0 && blockIdx.z < 2) { print(tKgK); } // __syncthreads(); if (Kernel_traits::Is_Q_in_regs && !Kernel_traits::Share_Q_K_smem) { flash::cp_async_wait<1>(); __syncthreads(); Tensor tSrQ_copy_view = smem_thr_copy_Q.retile_D(tSrQ); CUTE_STATIC_ASSERT_V(size<1>(tSsQ) == size<1>(tSrQ_copy_view)); // M cute::copy(smem_tiled_copy_Q, tSsQ, tSrQ_copy_view); } clear(acc_o); flash::Softmax<2 * size<1>(acc_o)> softmax; const float alibi_slope = !Has_alibi || params.alibi_slopes_ptr == nullptr ? 0.0f : reinterpret_cast<float *>(params.alibi_slopes_ptr)[bidb * params.alibi_slopes_batch_stride + bidh] / params.scale_softmax; flash::Mask<Is_causal, Is_local, Has_alibi> mask(binfo.actual_seqlen_k, binfo.actual_seqlen_q, params.window_size_left, params.window_size_right, alibi_slope); // For performance reason, we separate out two kinds of iterations: // those that need masking on S, and those that don't. // We need masking on S for the very last block when K and V has length not multiple of kBlockN. // We also need masking on S if it's causal, for the last ceil_div(kBlockM, kBlockN) blocks. // We will have at least 1 "masking" iteration. // If not even_N, then seqlen_k might end in the middle of a block. In that case we need to // mask 2 blocks (e.g. when kBlockM == kBlockN), not just 1. constexpr int n_masking_steps = (!Is_causal && !Is_local) ? 1 : ((Is_even_MN && Is_causal) ? cute::ceil_div(kBlockM, kBlockN) : cute::ceil_div(kBlockM, kBlockN) + 1); #pragma unroll for (int masking_step = 0; masking_step < n_masking_steps; ++masking_step, --n_block) { Tensor acc_s = partition_fragment_C(tiled_mma, Shape<Int<kBlockM>, Int<kBlockN>>{}); // (MMA=4, MMA_M, MMA_N) clear(acc_s); flash::cp_async_wait<0>(); __syncthreads(); // Advance gV if (masking_step > 0) { flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_tiled_copy_QKV, tVgV(_, _, _, n_block), tVsV, tKVcKV, tKVpKV); } else { // Clear the smem tiles to account for predicated off loads flash::copy<Is_even_MN, Is_even_K, /*Clear_OOB_MN=*/true>( gmem_tiled_copy_QKV, tVgV(_, _, _, n_block), tVsV, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN ); } cute::cp_async_fence(); flash::gemm</*A_in_regs=*/Kernel_traits::Is_Q_in_regs>( acc_s, tSrQ, tSrK, tSsQ, tSsK, tiled_mma, smem_tiled_copy_Q, smem_tiled_copy_K, smem_thr_copy_Q, smem_thr_copy_K ); // if (cute::thread0()) { print(acc_s); } if constexpr (Is_softcap){ flash::apply_softcap(acc_s, params.softcap); } mask.template apply_mask<Is_causal, Is_even_MN>( acc_s, n_block * kBlockN, m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, kNWarps * 16 ); flash::cp_async_wait<0>(); __syncthreads(); if (n_block > n_block_min) { flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_tiled_copy_QKV, tKgK(_, _, _, n_block - 1), tKsK, tKVcKV, tKVpKV); // This cp_async_fence needs to be in the if block, otherwise the synchronization // isn't right and we get race conditions. cute::cp_async_fence(); } // TODO: when we have key_padding_mask we'll need to Check_inf masking_step == 0 ? softmax.template softmax_rescale_o</*Is_first=*/true, /*Check_inf=*/Is_causal || Is_local>(acc_s, acc_o, params.scale_softmax_log2) : softmax.template softmax_rescale_o</*Is_first=*/false, /*Check_inf=*/Is_causal || Is_local>(acc_s, acc_o, params.scale_softmax_log2); // Convert acc_s from fp32 to fp16/bf16 Tensor rP = flash::convert_type<Element>(acc_s); int block_row_idx = m_block * (kBlockM / 16) + tidx / 32; int block_col_idx = n_block * (kBlockN / 32); if (Return_softmax) { Tensor rP_drop = make_fragment_like(rP); cute::copy(rP, rP_drop); dropout.template apply_dropout</*encode_dropout_in_sign_bit=*/true>( rP_drop, block_row_idx, block_col_idx, kNWarps ); cute::copy(rP_drop, tSgS); tSgS.data() = tSgS.data() + (-kBlockN); } if (Is_dropout) { dropout.apply_dropout(rP, block_row_idx, block_col_idx, kNWarps); } // Reshape rP from (MMA=4, MMA_M, MMA_N) to ((4, 2), MMA_M, MMA_N / 2) // if using m16n8k16 or (4, MMA_M, MMA_N) if using m16n8k8. Tensor tOrP = make_tensor(rP.data(), flash::convert_layout_acc_Aregs<Kernel_traits::TiledMma>(rP.layout())); // if (cute::thread0()) { print(tOrP); } flash::gemm_rs(acc_o, tOrP, tOrVt, tOsVt, tiled_mma, smem_tiled_copy_V, smem_thr_copy_V); // if (cute::thread0()) { print(scores); } // This check is at the end of the loop since we always have at least 1 iteration if (n_masking_steps > 1 && n_block <= n_block_min) { --n_block; break; } } // These are the iterations where we don't need masking on S for (; n_block >= n_block_min; --n_block) { Tensor acc_s = partition_fragment_C(tiled_mma, Shape<Int<kBlockM>, Int<kBlockN>>{}); // (MMA=4, MMA_M, MMA_N) clear(acc_s); flash::cp_async_wait<0>(); __syncthreads(); flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_tiled_copy_QKV, tVgV(_, _, _, n_block), tVsV, tKVcKV, tKVpKV); cute::cp_async_fence(); flash::gemm</*A_in_regs=*/Kernel_traits::Is_Q_in_regs>( acc_s, tSrQ, tSrK, tSsQ, tSsK, tiled_mma, smem_tiled_copy_Q, smem_tiled_copy_K, smem_thr_copy_Q, smem_thr_copy_K ); if constexpr (Is_softcap){ flash::apply_softcap(acc_s, params.softcap); } flash::cp_async_wait<0>(); __syncthreads(); if (n_block > n_block_min) { flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_tiled_copy_QKV, tKgK(_, _, _, n_block - 1), tKsK, tKVcKV, tKVpKV); // This cp_async_fence needs to be in the if block, otherwise the synchronization // isn't right and we get race conditions. cute::cp_async_fence(); } mask.template apply_mask</*Causal_mask=*/false>( acc_s, n_block * kBlockN, m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, kNWarps * 16 ); softmax.template softmax_rescale_o</*Is_first=*/false, /*Check_inf=*/Is_local>(acc_s, acc_o, params.scale_softmax_log2); Tensor rP = flash::convert_type<Element>(acc_s); int block_row_idx = m_block * (kBlockM / 16) + tidx / 32; int block_col_idx = n_block * (kBlockN / 32); if (Return_softmax) { Tensor rP_drop = make_fragment_like(rP); cute::copy(rP, rP_drop); dropout.template apply_dropout</*encode_dropout_in_sign_bit=*/true>( rP_drop, block_row_idx, block_col_idx, kNWarps ); cute::copy(rP_drop, tSgS); tSgS.data() = tSgS.data() + (-kBlockN); } if (Is_dropout) { dropout.apply_dropout(rP, block_row_idx, block_col_idx, kNWarps); } // Reshape rP from (MMA=4, MMA_M, MMA_N) to ((4, 2), MMA_M, MMA_N / 2) // if using m16n8k16 or (4, MMA_M, MMA_N) if using m16n8k8. Tensor tOrP = make_tensor(rP.data(), flash::convert_layout_acc_Aregs<Kernel_traits::TiledMma>(rP.layout())); flash::gemm_rs(acc_o, tOrP, tOrVt, tOsVt, tiled_mma, smem_tiled_copy_V, smem_thr_copy_V); } // Epilogue Tensor lse = softmax.template normalize_softmax_lse<Is_dropout>(acc_o, params.scale_softmax, params.rp_dropout); // Convert acc_o from fp32 to fp16/bf16 Tensor rO = flash::convert_type<Element>(acc_o); Tensor sO = make_tensor(sQ.data(), typename Kernel_traits::SmemLayoutO{}); // (SMEM_M,SMEM_N) // Partition sO to match the accumulator partitioning auto smem_tiled_copy_O = make_tiled_copy_C(typename Kernel_traits::SmemCopyAtomO{}, tiled_mma); auto smem_thr_copy_O = smem_tiled_copy_O.get_thread_slice(tidx); Tensor taccOrO = smem_thr_copy_O.retile_S(rO); // ((Atom,AtomNum), MMA_M, MMA_N) Tensor taccOsO = smem_thr_copy_O.partition_D(sO); // ((Atom,AtomNum),PIPE_M,PIPE_N) // sO has the same size as sQ, so we don't need to sync here. if (Kernel_traits::Share_Q_K_smem) { __syncthreads(); } cute::copy(smem_tiled_copy_O, taccOrO, taccOsO); Tensor mO = make_tensor(make_gmem_ptr(reinterpret_cast<Element*>(params.o_ptr) + binfo.q_offset(params.o_batch_stride, params.o_row_stride, bidb)), make_shape(binfo.actual_seqlen_q, params.h, params.d), make_stride(params.o_row_stride, params.o_head_stride, _1{})); Tensor gO = local_tile(mO(_, bidh, _), Shape<Int<kBlockM>, Int<kHeadDim>>{}, make_coord(m_block, 0)); // (kBlockM, kHeadDim) Tensor gLSE = get_lse_tile<ElementAccum, Params, kBlockM, Is_even_MN>(params, bidb, bidh, m_block, binfo); typename Kernel_traits::GmemTiledCopyO gmem_tiled_copy_O; auto gmem_thr_copy_O = gmem_tiled_copy_O.get_thread_slice(tidx); Tensor tOsO = gmem_thr_copy_O.partition_S(sO); // ((Atom,AtomNum),ATOM_M,ATOM_N) Tensor tOgO = gmem_thr_copy_O.partition_D(gO); __syncthreads(); Tensor tOrO = make_tensor<Element>(shape(tOgO)); cute::copy(gmem_tiled_copy_O, tOsO, tOrO); Tensor caccO = make_identity_tensor(Shape<Int<kBlockM>, Int<kHeadDim>>{}); // (BLK_M,BLK_K) -> (blk_m,blk_k) Tensor taccOcO = thr_mma.partition_C(caccO); // (MMA,MMA_M,MMA_K) static_assert(decltype(size<0>(taccOcO))::value == 4); // Convert to ((2, 2), MMA_M, MMA_K) then take only the row indices. Tensor taccOcO_row = logical_divide(taccOcO, Shape<_2>{})(make_coord(0, _), _, 0); CUTE_STATIC_ASSERT_V(size(lse) == size(taccOcO_row)); // MMA_M if (get<1>(taccOcO_row(0)) == 0) { #pragma unroll for (int mi = 0; mi < size(lse); ++mi) { const int row = get<0>(taccOcO_row(mi)); if (row < binfo.actual_seqlen_q - m_block * kBlockM) { gLSE(row) = lse(mi); } } } // Construct identity layout for sO Tensor cO = make_identity_tensor(make_shape(size<0>(sO), size<1>(sO))); // (BLK_M,BLK_K) -> (blk_m,blk_k) // Repeat the partitioning with identity layouts Tensor tOcO = gmem_thr_copy_O.partition_D(cO); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k) Tensor tOpO = make_tensor<bool>(make_shape(size<2>(tOgO))); if (!Is_even_K) { #pragma unroll for (int k = 0; k < size(tOpO); ++k) { tOpO(k) = get<1>(tOcO(0, 0, k)) < params.d; } } // Clear_OOB_K must be false since we don't want to write zeros to gmem flash::copy<Is_even_MN, Is_even_K, /*Clear_OOB_MN=*/false, /*Clear_OOB_K=*/false>( gmem_tiled_copy_O, tOrO, tOgO, tOcO, tOpO, binfo.actual_seqlen_q - m_block * kBlockM ); } //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename Kernel_traits, bool Is_causal, bool Is_local, bool Has_alibi, bool Is_even_MN, bool Is_even_K, bool Is_softcap, bool Split, bool Append_KV, typename Params> inline __device__ void compute_attn_1rowblock_splitkv(const Params &params, const int bidb, const int bidh, const int m_block, const int n_split_idx, const int num_n_splits) { using Element = typename Kernel_traits::Element; using ElementAccum = typename Kernel_traits::ElementAccum; using index_t = typename Kernel_traits::index_t; // Shared memory. extern __shared__ char smem_[]; // The thread index. const int tidx = threadIdx.x; constexpr int kBlockM = Kernel_traits::kBlockM; constexpr int kBlockN = Kernel_traits::kBlockN; constexpr int kHeadDim = Kernel_traits::kHeadDim; constexpr int kNWarps = Kernel_traits::kNWarps; using GmemTiledCopyO = std::conditional_t< !Split, typename Kernel_traits::GmemTiledCopyO, typename Kernel_traits::GmemTiledCopyOaccum >; using ElementO = std::conditional_t<!Split, Element, ElementAccum>; const BlockInfo</*Varlen=*/!Is_even_MN> binfo(params, bidb); // if (threadIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) { printf("Is_even_MN = %d, is_cumulativ = %d, seqlen_k_cache = %d, actual_seqlen_k = %d\n", Is_even_MN, params.is_seqlens_k_cumulative, binfo.seqlen_k_cache, binfo.actual_seqlen_k); } // if (threadIdx.x == 0 && blockIdx.y == 1 && blockIdx.z == 0) { printf("params.knew_ptr = %p, seqlen_k_cache + seqlen_knew = %d\n", params.knew_ptr, binfo.seqlen_k_cache + (params.knew_ptr == nullptr ? 0 : params.seqlen_knew)); } if (m_block * kBlockM >= binfo.actual_seqlen_q) return; const int n_blocks_per_split = ((params.seqlen_k + kBlockN - 1) / kBlockN + num_n_splits - 1) / num_n_splits; const int n_block_min = !Is_local ? n_split_idx * n_blocks_per_split : std::max(n_split_idx * n_blocks_per_split, (m_block * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q - params.window_size_left) / kBlockN); int n_block_max = std::min(cute::ceil_div(binfo.actual_seqlen_k, kBlockN), (n_split_idx + 1) * n_blocks_per_split); if (Is_causal || Is_local) { n_block_max = std::min(n_block_max, cute::ceil_div((m_block + 1) * kBlockM + binfo.actual_seqlen_k - binfo.actual_seqlen_q + params.window_size_right, kBlockN)); } if (n_block_min >= n_block_max) { // This also covers the case where n_block_max <= 0 // We exit early and write 0 to gOaccum and -inf to gLSEaccum. // Otherwise we might read OOB elements from gK and gV, // or get wrong results when we combine gOaccum from different blocks. const index_t row_offset_o = binfo.q_offset(params.o_batch_stride, params.o_row_stride, bidb) + m_block * kBlockM * params.o_row_stride + bidh * params.o_head_stride; const index_t row_offset_oaccum = (((n_split_idx * params.b + bidb) * params.h + bidh) * params.seqlen_q + m_block * kBlockM) * params.d_rounded; const index_t row_offset_lseaccum = ((n_split_idx * params.b + bidb) * params.h + bidh) * params.seqlen_q + m_block * kBlockM; Tensor gOaccum = make_tensor(make_gmem_ptr(reinterpret_cast<ElementO *>(Split ? params.oaccum_ptr : params.o_ptr) + (Split ? row_offset_oaccum : row_offset_o)), Shape<Int<kBlockM>, Int<kHeadDim>>{}, make_stride(Split ? kHeadDim : params.o_row_stride, _1{})); Tensor gLSEaccum = make_tensor(make_gmem_ptr(reinterpret_cast<ElementAccum *>(Split ? params.softmax_lseaccum_ptr : params.softmax_lse_ptr) + row_offset_lseaccum), Shape<Int<kBlockM>>{}, Stride<_1>{}); GmemTiledCopyO gmem_tiled_copy_Oaccum; auto gmem_thr_copy_Oaccum = gmem_tiled_copy_Oaccum.get_thread_slice(tidx); Tensor tOgOaccum = gmem_thr_copy_Oaccum.partition_D(gOaccum); Tensor tOrOaccum = make_tensor<ElementO>(shape(tOgOaccum)); clear(tOrOaccum); // Construct identity layout for sO Tensor cO = make_identity_tensor(make_shape(size<0>(gOaccum), size<1>(gOaccum))); // (BLK_M,BLK_K) -> (blk_m,blk_k) // Repeat the partitioning with identity layouts Tensor tOcO = gmem_thr_copy_Oaccum.partition_D(cO); Tensor tOpO = make_tensor<bool>(make_shape(size<2>(tOgOaccum))); if (!Is_even_K) { #pragma unroll for (int k = 0; k < size(tOpO); ++k) { tOpO(k) = get<1>(tOcO(0, 0, k)) < params.d; } } // Clear_OOB_K must be false since we don't want to write zeros to gmem flash::copy<Is_even_MN, Is_even_K, /*Clear_OOB_MN=*/false, /*Clear_OOB_K=*/false>( gmem_tiled_copy_Oaccum, tOrOaccum, tOgOaccum, tOcO, tOpO, binfo.actual_seqlen_q - m_block * kBlockM ); #pragma unroll for (int m = 0; m < size<1>(tOgOaccum); ++m) { const int row = get<0>(tOcO(0, m, 0)); if (row < binfo.actual_seqlen_q - m_block * kBlockM && get<1>(tOcO(0, m, 0)) == 0) { gLSEaccum(row) = Split ? -INFINITY : INFINITY; } } return; } // We iterate over the blocks in reverse order. This is because the last block is the only one // that needs masking when we read K and V from global memory. Moreover, iterating in reverse // might save us 1 register (we just need n_block instead of both n_block and n_block_max). // We move K and V to the last block. const int bidb_cache = params.cache_batch_idx == nullptr ? bidb : params.cache_batch_idx[bidb]; const int *block_table = params.block_table == nullptr ? nullptr : params.block_table + bidb * params.block_table_batch_stride; const int block_table_idx = block_table == nullptr ? 0 : (n_block_max - 1) * kBlockN / params.page_block_size; const int block_table_offset = block_table == nullptr ? 0 : (n_block_max - 1) * kBlockN - block_table_idx * params.page_block_size; const index_t row_offset_k = block_table == nullptr ? binfo.k_offset(params.k_batch_stride, params.k_row_stride, bidb_cache) + (n_block_max - 1) * kBlockN * params.k_row_stride + (bidh / params.h_h_k_ratio) * params.k_head_stride : block_table[block_table_idx] * params.k_batch_stride + block_table_offset * params.k_row_stride + (bidh / params.h_h_k_ratio) * params.k_head_stride; const index_t row_offset_v = block_table == nullptr ? binfo.k_offset(params.v_batch_stride, params.v_row_stride, bidb_cache) + (n_block_max - 1) * kBlockN * params.v_row_stride + (bidh / params.h_h_k_ratio) * params.v_head_stride : block_table[block_table_idx] * params.v_batch_stride + block_table_offset * params.v_row_stride + (bidh / params.h_h_k_ratio) * params.v_head_stride; Tensor mQ = make_tensor(make_gmem_ptr(reinterpret_cast<Element*>(params.q_ptr) + binfo.q_offset(params.q_batch_stride, params.q_row_stride, bidb)), make_shape(binfo.actual_seqlen_q, params.h, params.d), make_stride(params.q_row_stride, params.q_head_stride, _1{})); Tensor gQ = local_tile(mQ(_, bidh, _), Shape<Int<kBlockM>, Int<kHeadDim>>{}, make_coord(m_block, 0)); // (kBlockM, kHeadDim) Tensor gK = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.k_ptr) + row_offset_k), Shape<Int<kBlockN>, Int<kHeadDim>>{}, make_stride(params.k_row_stride, _1{})); // if (threadIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) { printf("k_ptr = %p, row_offset_k = %d, gK_ptr = %p\n", params.k_ptr, row_offset_k, gK.data()); } Tensor gV = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.v_ptr) + row_offset_v), Shape<Int<kBlockN>, Int<kHeadDim>>{}, make_stride(params.v_row_stride, _1{})); Tensor sQ = make_tensor(make_smem_ptr(reinterpret_cast<Element *>(smem_)), typename Kernel_traits::SmemLayoutQ{}); Tensor sK = make_tensor(sQ.data() + size(sQ), typename Kernel_traits::SmemLayoutKV{}); Tensor sV = make_tensor(sK.data() + size(sK), typename Kernel_traits::SmemLayoutKV{}); Tensor sVt = make_tensor(sV.data(), typename Kernel_traits::SmemLayoutVtransposed{}); Tensor sVtNoSwizzle = make_tensor(sV.data().get(), typename Kernel_traits::SmemLayoutVtransposedNoSwizzle{}); typename Kernel_traits::GmemTiledCopyQKV gmem_tiled_copy_QKV; auto gmem_thr_copy_QKV = gmem_tiled_copy_QKV.get_thread_slice(tidx); Tensor tQgQ = gmem_thr_copy_QKV.partition_S(gQ); Tensor tQsQ = gmem_thr_copy_QKV.partition_D(sQ); Tensor tKgK = gmem_thr_copy_QKV.partition_S(gK); // (KCPY, KCPY_N, KCPY_K) Tensor tKsK = gmem_thr_copy_QKV.partition_D(sK); Tensor tVgV = gmem_thr_copy_QKV.partition_S(gV); // (VCPY, VCPY_N, VCPY_K) Tensor tVsV = gmem_thr_copy_QKV.partition_D(sV); typename Kernel_traits::TiledMma tiled_mma; auto thr_mma = tiled_mma.get_thread_slice(tidx); Tensor tSrQ = thr_mma.partition_fragment_A(sQ); // (MMA,MMA_M,MMA_K) Tensor tSrK = thr_mma.partition_fragment_B(sK); // (MMA,MMA_N,MMA_K) Tensor tOrVt = thr_mma.partition_fragment_B(sVtNoSwizzle); // (MMA, MMA_K,MMA_N) Tensor acc_o = partition_fragment_C(tiled_mma, Shape<Int<kBlockM>, Int<kHeadDim>>{}); // MMA, MMA_M, MMA_K // // Copy Atom retiling // auto smem_tiled_copy_Q = make_tiled_copy_A(typename Kernel_traits::SmemCopyAtom{}, tiled_mma); auto smem_thr_copy_Q = smem_tiled_copy_Q.get_thread_slice(tidx); Tensor tSsQ = smem_thr_copy_Q.partition_S(sQ); auto smem_tiled_copy_K = make_tiled_copy_B(typename Kernel_traits::SmemCopyAtom{}, tiled_mma); auto smem_thr_copy_K = smem_tiled_copy_K.get_thread_slice(tidx); Tensor tSsK = smem_thr_copy_K.partition_S(sK); auto smem_tiled_copy_V = make_tiled_copy_B(typename Kernel_traits::SmemCopyAtomTransposed{}, tiled_mma); auto smem_thr_copy_V = smem_tiled_copy_V.get_thread_slice(tidx); Tensor tOsVt = smem_thr_copy_V.partition_S(sVt); // PREDICATES // // // Allocate predicate tensors for m and n // Tensor tQpQ = make_tensor<bool>(make_shape(size<1>(tQsQ), size<2>(tQsQ)), Stride<_1,_0>{}); // Tensor tKVpKV = make_tensor<bool>(make_shape(size<1>(tKsK), size<2>(tKsK)), Stride<_1,_0>{}); // Construct identity layout for sQ and sK Tensor cQ = make_identity_tensor(make_shape(size<0>(sQ), size<1>(sQ))); // (BLK_M,BLK_K) -> (blk_m,blk_k) Tensor cKV = make_identity_tensor(make_shape(size<0>(sK), size<1>(sK))); // (BLK_N,BLK_K) -> (blk_n,blk_k) // Repeat the partitioning with identity layouts Tensor tQcQ = gmem_thr_copy_QKV.partition_S(cQ); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k) Tensor tKVcKV = gmem_thr_copy_QKV.partition_S(cKV); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k) // Allocate predicate tensors for k Tensor tQpQ = make_tensor<bool>(make_shape(size<2>(tQsQ))); Tensor tKVpKV = make_tensor<bool>(make_shape(size<2>(tKsK))); // Set predicates for k bounds if (!Is_even_K) { #pragma unroll for (int k = 0; k < size(tQpQ); ++k) { tQpQ(k) = get<1>(tQcQ(0, 0, k)) < params.d; } #pragma unroll for (int k = 0; k < size(tKVpKV); ++k) { tKVpKV(k) = get<1>(tKVcKV(0, 0, k)) < params.d; } } // Prologue // Copy from Knew to K, optionally apply rotary embedding. typename Kernel_traits::GmemTiledCopyRotcossin gmem_tiled_copy_rotary; auto gmem_thr_copy_rotary = gmem_tiled_copy_rotary.get_thread_slice(tidx); typename Kernel_traits::GmemTiledCopyRotcossinCont gmem_tiled_copy_rotary_cont; auto gmem_thr_copy_rotary_cont = gmem_tiled_copy_rotary_cont.get_thread_slice(tidx); if constexpr (Append_KV) { // Even if we have MQA / GQA, all threadblocks responsible for the same KV head are writing to // gmem. Technically it's a race condition, but they all write the same content anyway, and it's safe. // We want to do this so that all threadblocks can proceed right after they finish writing the KV cache. const index_t row_offset_cossin = ((n_block_max - 1) * kBlockN + (params.leftpad_k == nullptr ? 0 : params.leftpad_k[bidb])) * (params.rotary_dim / 2); Tensor gCos = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.rotary_cos_ptr) + row_offset_cossin), Shape<Int<kBlockN>, Int<kHeadDim / 2>>{}, make_stride(params.rotary_dim / 2, _1{})); Tensor gSin = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.rotary_sin_ptr) + row_offset_cossin), Shape<Int<kBlockN>, Int<kHeadDim / 2>>{}, make_stride(params.rotary_dim / 2, _1{})); Tensor gCosCont = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.rotary_cos_ptr) + row_offset_cossin), Shape<Int<kBlockN>, Int<kHeadDim>>{}, make_stride(params.rotary_dim / 2, _1{})); Tensor gSinCont = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.rotary_sin_ptr) + row_offset_cossin), Shape<Int<kBlockN>, Int<kHeadDim>>{}, make_stride(params.rotary_dim / 2, _1{})); Tensor tRgCos = gmem_thr_copy_rotary.partition_S(gCos); Tensor tRgSin = gmem_thr_copy_rotary.partition_S(gSin); Tensor tRgCosCont = gmem_thr_copy_rotary_cont.partition_S(gCosCont); Tensor tRgSinCont = gmem_thr_copy_rotary_cont.partition_S(gSinCont); // if (cute::thread(0, 0)) { printf("rotary_cos_ptr = %p, gCos.data() = %p, tRgCos.data() = %p, rotary_dim = %d\n", params.rotary_cos_ptr, gCos.data(), tRgCos.data(), params.rotary_dim); } // if (cute::thread(8, 0)) { print_tensor(gCos); } // if (cute::thread(0, 0)) { print_tensor(tRgCos); } // const index_t row_offset_knew = binfo.k_offset(params.knew_batch_stride, params.knew_row_stride, bidb) const index_t row_offset_knew = bidb * params.knew_batch_stride + ((n_block_max - 1) * kBlockN) * params.knew_row_stride + (bidh / params.h_h_k_ratio) * params.knew_head_stride; // const index_t row_offset_vnew = binfo.k_offset(params.vnew_batch_stride, params.vnew_row_stride, bidb) const index_t row_offset_vnew = bidb * params.vnew_batch_stride + ((n_block_max - 1) * kBlockN) * params.vnew_row_stride + (bidh / params.h_h_k_ratio) * params.vnew_head_stride; // Subtract seqlen_k_cache * row stride so that conceptually gK and gKnew "line up". When we access them, // e.g. if gK has 128 rows and gKnew has 64 rows, we access gK[:128] and gKNew[128:128 + 64]. // This maps to accessing the first 64 rows of knew_ptr. Tensor gKnew = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.knew_ptr) + row_offset_knew - binfo.seqlen_k_cache * params.knew_row_stride), Shape<Int<kBlockN>, Int<kHeadDim>>{}, make_stride(params.knew_row_stride, _1{})); // if (threadIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) { printf("knew_ptr = %p, row_offset_knew = %d, gKnew_ptr = %p\n", params.knew_ptr, row_offset_knew, gKnew.data()); } Tensor gVnew = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.vnew_ptr) + row_offset_vnew - binfo.seqlen_k_cache * params.vnew_row_stride), Shape<Int<kBlockN>, Int<kHeadDim>>{}, make_stride(params.vnew_row_stride, _1{})); Tensor tKgKnew = gmem_thr_copy_QKV.partition_S(gKnew); // (KCPY, KCPY_N, KCPY_K) Tensor tVgVnew = gmem_thr_copy_QKV.partition_S(gVnew); // (VCPY, VCPY_N, VCPY_K) const int n_block_copy_min = std::max(n_block_min, binfo.seqlen_k_cache / kBlockN); auto tKgK_data = tKgK.data(); auto tVgV_data = tVgV.data(); for (int n_block = n_block_max - 1; n_block >= n_block_copy_min; n_block--) { flash::copy_w_min_idx<Is_even_K>( tVgVnew, tVgV, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN ); tVgVnew.data() = tVgVnew.data() + (-int(kBlockN * params.vnew_row_stride)); if (params.rotary_dim == 0) { flash::copy_w_min_idx<Is_even_K>( tKgKnew, tKgK, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN ); } else { if (params.is_rotary_interleaved) { // Don't clear OOB_K because we're writing to global memory flash::copy_rotary_interleaved<Is_even_K, /*Clear_OOB_K=*/false>( tKgKnew, tKgK, tRgCos, tRgSin, tKVcKV, binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN, params.d, params.rotary_dim ); tRgCos.data() = tRgCos.data() + (-int(kBlockN * params.rotary_dim / 2)); tRgSin.data() = tRgSin.data() + (-int(kBlockN * params.rotary_dim / 2)); } else { // Don't clear OOB_K because we're writing to global memory flash::copy_rotary_contiguous<Is_even_K, /*Clear_OOB_K=*/false>( tKgKnew, tKgK, tRgCosCont, tRgSinCont, tKVcKV, binfo.actual_seqlen_k - n_block * kBlockN, binfo.seqlen_k_cache - n_block * kBlockN, params.d, params.rotary_dim ); tRgCosCont.data() = tRgCosCont.data() + (-int(kBlockN * params.rotary_dim / 2)); tRgSinCont.data() = tRgSinCont.data() + (-int(kBlockN * params.rotary_dim / 2)); } } tKgKnew.data() = tKgKnew.data() + (-int(kBlockN * params.knew_row_stride)); if (block_table == nullptr) { tVgV.data() = tVgV.data() + (-int(kBlockN * params.v_row_stride)); tKgK.data() = tKgK.data() + (-int(kBlockN * params.k_row_stride)); } else { if (n_block > n_block_copy_min) { const int block_table_idx_cur = n_block * kBlockN / params.page_block_size; const int block_table_offset_cur = n_block * kBlockN - block_table_idx_cur * params.page_block_size; const int block_table_idx_next = (n_block - 1) * kBlockN / params.page_block_size; const int block_table_offset_next = (n_block - 1) * kBlockN - block_table_idx_next * params.page_block_size; const int table_diff = block_table[block_table_idx_next] - block_table[block_table_idx_cur]; const int offset_diff = block_table_offset_next - block_table_offset_cur; tVgV.data() = tVgV.data() + table_diff * params.v_batch_stride + offset_diff * params.v_row_stride; tKgK.data() = tKgK.data() + table_diff * params.k_batch_stride + offset_diff * params.k_row_stride; } } } // Need this before we can read in K again, so that we'll see the updated K values. __syncthreads(); tKgK.data() = tKgK_data; tVgV.data() = tVgV_data; } // Read Q from gmem to smem, optionally apply rotary embedding. if (!Append_KV || params.rotary_dim == 0) { // We don't need to clear the sQ smem tiles since we'll only write out the valid outputs flash::copy<Is_even_MN, Is_even_K>(gmem_tiled_copy_QKV, tQgQ, tQsQ, tQcQ, tQpQ, binfo.actual_seqlen_q - m_block * kBlockM); } else { const index_t row_offset_cossin = (binfo.seqlen_k_cache + (params.leftpad_k == nullptr ? 0 : params.leftpad_k[bidb]) + (Is_causal || Is_local ? m_block * kBlockM : 0)) * (params.rotary_dim / 2); // If not causal, all the queries get the same the cos/sin, taken at location seqlen_k_cache. // We do this by setting the row stride of gCos / gSin to 0. Tensor gCos = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.rotary_cos_ptr) + row_offset_cossin), Shape<Int<kBlockM>, Int<kHeadDim / 2>>{}, make_stride(Is_causal || Is_local ? params.rotary_dim / 2 : 0, _1{})); Tensor gSin = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.rotary_sin_ptr) + row_offset_cossin), Shape<Int<kBlockM>, Int<kHeadDim / 2>>{}, make_stride(Is_causal || Is_local ? params.rotary_dim / 2 : 0, _1{})); Tensor gCosCont = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.rotary_cos_ptr) + row_offset_cossin), Shape<Int<kBlockM>, Int<kHeadDim>>{}, make_stride(Is_causal || Is_local ? params.rotary_dim / 2 : 0, _1{})); Tensor gSinCont = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.rotary_sin_ptr) + row_offset_cossin), Shape<Int<kBlockM>, Int<kHeadDim>>{}, make_stride(Is_causal || Is_local ? params.rotary_dim / 2 : 0, _1{})); Tensor tRgCos = gmem_thr_copy_rotary.partition_S(gCos); Tensor tRgSin = gmem_thr_copy_rotary.partition_S(gSin); Tensor tRgCosCont = gmem_thr_copy_rotary_cont.partition_S(gCosCont); Tensor tRgSinCont = gmem_thr_copy_rotary_cont.partition_S(gSinCont); if (params.is_rotary_interleaved) { flash::copy_rotary_interleaved<Is_even_K>( tQgQ, tQsQ, tRgCos, tRgSin, tQcQ, binfo.actual_seqlen_q - m_block * kBlockM, 0, params.d, params.rotary_dim ); } else { flash::copy_rotary_contiguous<Is_even_K>( tQgQ, tQsQ, tRgCosCont, tRgSinCont, tQcQ, binfo.actual_seqlen_q - m_block * kBlockM, 0, params.d, params.rotary_dim ); } } int n_block = n_block_max - 1; // We don't need to clear the sK smem tiles since we'll mask out the scores anyway. flash::copy<Is_even_MN, Is_even_K>(gmem_tiled_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN); cute::cp_async_fence(); // flash::cp_async_wait<0>(); // __syncthreads(); // if (tidx == 0 && blockIdx.y == 0 && blockIdx.z == 0) { print(tKsK); } // __syncthreads(); clear(acc_o); flash::Softmax<2 * size<1>(acc_o)> softmax; const float alibi_slope = !Has_alibi ? 0.0f : reinterpret_cast<float *>(params.alibi_slopes_ptr)[bidb * params.alibi_slopes_batch_stride + bidh] / params.scale_softmax; flash::Mask<Is_causal, Is_local, Has_alibi> mask(binfo.actual_seqlen_k, binfo.actual_seqlen_q, params.window_size_left, params.window_size_right, alibi_slope); // For performance reason, we separate out two kinds of iterations: // those that need masking on S, and those that don't. // We need masking on S for the very last block when K and V has length not multiple of kBlockN. // We also need masking on S if it's causal, for the last ceil_div(kBlockM, kBlockN) blocks. // We will have at least 1 "masking" iteration. // If not even_N, then seqlen_k might end in the middle of a block. In that case we need to // mask 2 blocks (e.g. when kBlockM == kBlockN), not just 1. constexpr int n_masking_steps = (!Is_causal && !Is_local) ? 1 : ((Is_even_MN && Is_causal) ? cute::ceil_div(kBlockM, kBlockN) : cute::ceil_div(kBlockM, kBlockN) + 1); #pragma unroll for (int masking_step = 0; masking_step < n_masking_steps; ++masking_step, --n_block) { Tensor acc_s = partition_fragment_C(tiled_mma, Shape<Int<kBlockM>, Int<kBlockN>>{}); // (MMA=4, MMA_M, MMA_N) clear(acc_s); flash::cp_async_wait<0>(); __syncthreads(); // Advance gV if (masking_step > 0) { if (block_table == nullptr) { tVgV.data() = tVgV.data() + (-int(kBlockN * params.v_row_stride)); } else { const int block_table_idx_cur = (n_block + 1) * kBlockN / params.page_block_size; const int block_table_offset_cur = (n_block + 1) * kBlockN - block_table_idx_cur * params.page_block_size; const int block_table_idx_next = n_block * kBlockN / params.page_block_size; const int block_table_offset_next = n_block * kBlockN - block_table_idx_next * params.page_block_size; tVgV.data() = tVgV.data() + (block_table[block_table_idx_next] - block_table[block_table_idx_cur]) * params.v_batch_stride + (block_table_offset_next - block_table_offset_cur) * params.v_row_stride; } flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_tiled_copy_QKV, tVgV, tVsV, tKVcKV, tKVpKV); } else { // Clear the smem tiles to account for predicated off loads flash::copy<Is_even_MN, Is_even_K, /*Clear_OOB_MN=*/true>( gmem_tiled_copy_QKV, tVgV, tVsV, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN ); } cute::cp_async_fence(); flash::gemm( acc_s, tSrQ, tSrK, tSsQ, tSsK, tiled_mma, smem_tiled_copy_Q, smem_tiled_copy_K, smem_thr_copy_Q, smem_thr_copy_K ); // if (cute::thread0()) { print(acc_s); } if constexpr (Is_softcap){ flash::apply_softcap(acc_s, params.softcap); } mask.template apply_mask<Is_causal, Is_even_MN>( acc_s, n_block * kBlockN, m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, kNWarps * 16 ); flash::cp_async_wait<0>(); __syncthreads(); // if (tidx == 0 && blockIdx.y == 0 && blockIdx.z == 0) { print(tVsV); } // __syncthreads(); if (n_block > n_block_min) { // Advance gK if (block_table == nullptr) { tKgK.data() = tKgK.data() + (-int(kBlockN * params.k_row_stride)); } else { const int block_table_idx_cur = n_block * kBlockN / params.page_block_size; const int block_table_offset_cur = n_block * kBlockN - block_table_idx_cur * params.page_block_size; const int block_table_idx_next = (n_block - 1) * kBlockN / params.page_block_size; const int block_table_offset_next =(n_block - 1) * kBlockN - block_table_idx_next * params.page_block_size; tKgK.data() = tKgK.data() + (block_table[block_table_idx_next] - block_table[block_table_idx_cur]) * params.k_batch_stride + (block_table_offset_next - block_table_offset_cur) * params.k_row_stride; } flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_tiled_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV); // This cp_async_fence needs to be in the if block, otherwise the synchronization // isn't right and we get race conditions. cute::cp_async_fence(); } // We have key_padding_mask so we'll need to Check_inf masking_step == 0 ? softmax.template softmax_rescale_o</*Is_first=*/true, /*Check_inf=*/Is_causal || Is_local || !Is_even_MN>(acc_s, acc_o, params.scale_softmax_log2) : softmax.template softmax_rescale_o</*Is_first=*/false, /*Check_inf=*/Is_causal || Is_local || !Is_even_MN>(acc_s, acc_o, params.scale_softmax_log2); // if (cute::thread0()) { print(scores_max); print(scores_sum); print(scores); } // Convert acc_s from fp32 to fp16/bf16 Tensor rP = flash::convert_type<Element>(acc_s); // Reshape rP from (MMA=4, MMA_M, MMA_N) to ((4, 2), MMA_M, MMA_N / 2) // if using m16n8k16 or (4, MMA_M, MMA_N) if using m16n8k8. Tensor tOrP = make_tensor(rP.data(), flash::convert_layout_acc_Aregs<Kernel_traits::TiledMma>(rP.layout())); flash::gemm_rs(acc_o, tOrP, tOrVt, tOsVt, tiled_mma, smem_tiled_copy_V, smem_thr_copy_V); // This check is at the end of the loop since we always have at least 1 iteration if (n_masking_steps > 1 && n_block <= n_block_min) { --n_block; break; } } // These are the iterations where we don't need masking on S for (; n_block >= n_block_min; --n_block) { Tensor acc_s = partition_fragment_C(tiled_mma, Shape<Int<kBlockM>, Int<kBlockN>>{}); // (MMA=4, MMA_M, MMA_N) clear(acc_s); flash::cp_async_wait<0>(); __syncthreads(); // Advance gV if (block_table == nullptr) { tVgV.data() = tVgV.data() + (-int(kBlockN * params.v_row_stride)); } else { const int block_table_idx_cur = (n_block + 1) * kBlockN / params.page_block_size; const int block_table_offset_cur = (n_block + 1) * kBlockN - block_table_idx_cur * params.page_block_size; const int block_table_idx_next = n_block * kBlockN / params.page_block_size; const int block_table_offset_next = n_block * kBlockN - block_table_idx_next * params.page_block_size; tVgV.data() = tVgV.data() + (block_table[block_table_idx_next] - block_table[block_table_idx_cur]) * params.v_batch_stride + (block_table_offset_next - block_table_offset_cur) * params.v_row_stride; } flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_tiled_copy_QKV, tVgV, tVsV, tKVcKV, tKVpKV); cute::cp_async_fence(); flash::gemm( acc_s, tSrQ, tSrK, tSsQ, tSsK, tiled_mma, smem_tiled_copy_Q, smem_tiled_copy_K, smem_thr_copy_Q, smem_thr_copy_K ); if constexpr (Is_softcap){ flash::apply_softcap(acc_s, params.softcap); } flash::cp_async_wait<0>(); __syncthreads(); if (n_block > n_block_min) { // Advance gK if (block_table == nullptr) { tKgK.data() = tKgK.data() + (-int(kBlockN * params.k_row_stride)); } else { const int block_table_idx_cur = n_block * kBlockN / params.page_block_size; const int block_table_offset_cur = n_block * kBlockN - block_table_idx_cur * params.page_block_size; const int block_table_idx_next = (n_block - 1) * kBlockN / params.page_block_size; const int block_table_offset_next = (n_block - 1) * kBlockN - block_table_idx_next * params.page_block_size; tKgK.data() = tKgK.data() + (block_table[block_table_idx_next] - block_table[block_table_idx_cur]) * params.k_batch_stride + (block_table_offset_next - block_table_offset_cur) * params.k_row_stride; } flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_tiled_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV); // This cp_async_fence needs to be in the if block, otherwise the synchronization // isn't right and we get race conditions. cute::cp_async_fence(); } mask.template apply_mask</*Causal_mask=*/false>( acc_s, n_block * kBlockN, m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, kNWarps * 16 ); softmax.template softmax_rescale_o</*Is_first=*/false, /*Check_inf=*/Is_local>(acc_s, acc_o, params.scale_softmax_log2); Tensor rP = flash::convert_type<Element>(acc_s); // Reshape rP from (MMA=4, MMA_M, MMA_N) to ((4, 2), MMA_M, MMA_N / 2) // if using m16n8k16 or (4, MMA_M, MMA_N) if using m16n8k8. Tensor tOrP = make_tensor(rP.data(), flash::convert_layout_acc_Aregs<Kernel_traits::TiledMma>(rP.layout())); flash::gemm_rs(acc_o, tOrP, tOrVt, tOsVt, tiled_mma, smem_tiled_copy_V, smem_thr_copy_V); } // Epilogue Tensor lse = softmax.template normalize_softmax_lse</*Is_dropout=*/false, Split>(acc_o, params.scale_softmax); // if (cute::thread0()) { print(lse); } Tensor sOaccum = make_tensor(make_smem_ptr(reinterpret_cast<ElementO *>(smem_)), typename Kernel_traits::SmemLayoutO{}); // (SMEM_M,SMEM_N) // Partition sO to match the accumulator partitioning using SmemTiledCopyO = std::conditional_t< !Split, typename Kernel_traits::SmemCopyAtomO, typename Kernel_traits::SmemCopyAtomOaccum >; auto smem_tiled_copy_Oaccum = make_tiled_copy_C(SmemTiledCopyO{}, tiled_mma); auto smem_thr_copy_Oaccum = smem_tiled_copy_Oaccum.get_thread_slice(tidx); Tensor rO = flash::convert_type<ElementO>(acc_o); Tensor taccOrOaccum = smem_thr_copy_Oaccum.retile_S(rO); // ((Atom,AtomNum), MMA_M, MMA_N) Tensor taccOsOaccum = smem_thr_copy_Oaccum.partition_D(sOaccum); // ((Atom,AtomNum),PIPE_M,PIPE_N) // sOaccum is larger than sQ, so we need to syncthreads here // TODO: allocate enough smem for sOaccum if constexpr (Split) { __syncthreads(); } cute::copy(smem_tiled_copy_Oaccum, taccOrOaccum, taccOsOaccum); const index_t row_offset_o = binfo.q_offset(params.o_batch_stride, params.o_row_stride, bidb) + m_block * kBlockM * params.o_row_stride + bidh * params.o_head_stride; const index_t row_offset_oaccum = (((n_split_idx * params.b + bidb) * params.h + bidh) * params.seqlen_q + m_block * kBlockM) * params.d_rounded; const index_t row_offset_lseaccum = (Split || !params.unpadded_lse ? ((n_split_idx * params.b + bidb) * params.h + bidh) * params.seqlen_q : bidh * params.total_q + binfo.q_offset(params.seqlen_q, 1, bidb) ) + m_block * kBlockM; Tensor gOaccum = make_tensor(make_gmem_ptr(reinterpret_cast<ElementO *>(Split ? params.oaccum_ptr : params.o_ptr) + (Split ? row_offset_oaccum : row_offset_o)), Shape<Int<kBlockM>, Int<kHeadDim>>{}, make_stride(Split ? kHeadDim : params.o_row_stride, _1{})); Tensor gLSEaccum = make_tensor(make_gmem_ptr(reinterpret_cast<ElementAccum *>(Split ? params.softmax_lseaccum_ptr : params.softmax_lse_ptr) + row_offset_lseaccum), Shape<Int<kBlockM>>{}, Stride<_1>{}); // if (tidx == 0) { printf("row_offset_o = %d, bidh = %d, gOaccum = %p\n", row_offset_o, bidh, gOaccum.data()); } GmemTiledCopyO gmem_tiled_copy_Oaccum; auto gmem_thr_copy_Oaccum = gmem_tiled_copy_Oaccum.get_thread_slice(tidx); Tensor tOsOaccum = gmem_thr_copy_Oaccum.partition_S(sOaccum); // ((Atom,AtomNum),ATOM_M,ATOM_N) Tensor tOgOaccum = gmem_thr_copy_Oaccum.partition_D(gOaccum); __syncthreads(); Tensor tOrOaccum = make_tensor<ElementO>(shape(tOgOaccum)); cute::copy(gmem_tiled_copy_Oaccum, tOsOaccum, tOrOaccum); Tensor caccO = make_identity_tensor(Shape<Int<kBlockM>, Int<kHeadDim>>{}); // (BLK_M,BLK_K) -> (blk_m,blk_k) Tensor taccOcO = thr_mma.partition_C(caccO); // (MMA,MMA_M,MMA_K) static_assert(decltype(size<0>(taccOcO))::value == 4); // Convert to ((2, 2), MMA_M, MMA_K) then take only the row indices. Tensor taccOcO_row = logical_divide(taccOcO, Shape<_2>{})(make_coord(0, _), _, 0); CUTE_STATIC_ASSERT_V(size(lse) == size(taccOcO_row)); // MMA_M if (get<1>(taccOcO_row(0)) == 0) { #pragma unroll for (int mi = 0; mi < size(lse); ++mi) { const int row = get<0>(taccOcO_row(mi)); if (row < binfo.actual_seqlen_q - m_block * kBlockM) { gLSEaccum(row) = lse(mi); } } } // Construct identity layout for sO Tensor cO = make_identity_tensor(make_shape(size<0>(sOaccum), size<1>(sOaccum))); // (BLK_M,BLK_K) -> (blk_m,blk_k) // Repeat the partitioning with identity layouts Tensor tOcO = gmem_thr_copy_Oaccum.partition_D(cO); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k) Tensor tOpO = make_tensor<bool>(make_shape(size<2>(tOgOaccum))); if (!Is_even_K) { #pragma unroll for (int k = 0; k < size(tOpO); ++k) { tOpO(k) = get<1>(tOcO(0, 0, k)) < params.d; } } // Clear_OOB_K must be false since we don't want to write zeros to gmem flash::copy<Is_even_MN, Is_even_K, /*Clear_OOB_MN=*/false, /*Clear_OOB_K=*/false>( gmem_tiled_copy_Oaccum, tOrOaccum, tOgOaccum, tOcO, tOpO, binfo.actual_seqlen_q - m_block * kBlockM ); } //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename Kernel_traits, bool Is_dropout, bool Is_causal, bool Is_local, bool Has_alibi, bool Is_even_MN, bool Is_even_K, bool Is_softcap, bool Return_softmax, typename Params> inline __device__ void compute_attn(const Params &params) { const int m_block = blockIdx.x; // The block index for the batch. const int bidb = blockIdx.y; // The block index for the head. const int bidh = blockIdx.z; // We want the fwd and bwd to generate the same dropout pattern (RNG), without restricting // them to have the same number of threads or have to traverse the attention matrix // in the same order. // In the Philox RNG, we use the offset to store the batch, head, and the lane id // (within a warp). We use the subsequence to store the location of the 16 x 32 blocks within // the attention matrix. This way, as long as we have the batch, head, and the location of // the 16 x 32 block within the attention matrix, we can generate the exact same dropout pattern. flash::compute_attn_1rowblock<Kernel_traits, Is_dropout, Is_causal, Is_local, Has_alibi, Is_even_MN, Is_even_K, Is_softcap, Return_softmax>(params, bidb, bidh, m_block); } //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename Kernel_traits, bool Is_causal, bool Is_local, bool Has_alibi, bool Is_even_MN, bool Is_even_K, bool Is_softcap, bool Split, bool Append_KV, typename Params> inline __device__ void compute_attn_splitkv(const Params &params) { const int m_block = blockIdx.x; // The block index for the batch. const int bidb = Split ? blockIdx.z / params.h : blockIdx.y; // The block index for the head. const int bidh = Split ? blockIdx.z - bidb * params.h : blockIdx.z; const int n_split_idx = Split ? blockIdx.y : 0; const int num_n_splits = Split ? gridDim.y : 1; flash::compute_attn_1rowblock_splitkv<Kernel_traits, Is_causal, Is_local, Has_alibi, Is_even_MN, Is_even_K, Is_softcap, Split, Append_KV>(params, bidb, bidh, m_block, n_split_idx, num_n_splits); } //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename Kernel_traits, int kBlockM, int Log_max_splits, bool Is_even_K, typename Params> inline __device__ void combine_attn_seqk_parallel(const Params &params) { using Element = typename Kernel_traits::Element; using ElementAccum = typename Kernel_traits::ElementAccum; using index_t = typename Kernel_traits::index_t; constexpr int kMaxSplits = 1 << Log_max_splits; constexpr int kHeadDim = Kernel_traits::kHeadDim; constexpr int kNThreads = Kernel_traits::kNThreads; static_assert(kMaxSplits <= 128, "kMaxSplits must be <= 128"); static_assert(kBlockM == 4 || kBlockM == 8 || kBlockM == 16 || kBlockM == 32, "kBlockM must be 4, 8, 16 or 32"); static_assert(kNThreads == 128, "We assume that each block has 128 threads"); // Shared memory. // kBlockM + 1 instead of kBlockM to reduce bank conflicts. __shared__ ElementAccum sLSE[kMaxSplits][kBlockM + 1]; // The thread and block index. const int tidx = threadIdx.x; const int bidx = blockIdx.x; const index_t lse_size = params.b * params.h * params.seqlen_q; const index_t row_offset_lse = bidx * kBlockM; Tensor gLSEaccum = make_tensor(make_gmem_ptr(reinterpret_cast<ElementAccum *>(params.softmax_lseaccum_ptr) + row_offset_lse), Shape<Int<kMaxSplits>, Int<kBlockM>>{}, make_stride(lse_size, _1{})); // LSE format is different depending on params.unpadded_lse and params.seqlenq_ngroups_swapped, see comment in get_lse_tile. // This tensor's layout maps row_offset_lse to {bidb, bidh, q_offset}. Tensor gLSE = make_tensor(make_gmem_ptr(reinterpret_cast<ElementAccum *>(params.softmax_lse_ptr) + row_offset_lse), Shape<Int<kBlockM>>{}, Stride<_1>{}); // This layout maps row_offset_lse to {bidh, q_offset, bidb} or {bidh, bidb, q_offset}. Layout flat_layout = make_layout(lse_size); Layout orig_layout = make_layout(make_shape(params.seqlen_q, params.h, params.b)); auto transposed_stride = params.seqlenq_ngroups_swapped ? make_stride(params.b, params.seqlen_q * params.b, 1) : make_stride(1, params.seqlen_q * params.b, params.seqlen_q); Layout remapped_layout = make_layout(make_shape(params.seqlen_q, params.h, params.b), transposed_stride); Layout final_layout = cute::composition(remapped_layout, cute::composition(orig_layout, flat_layout)); Tensor gLSE_unpadded = make_tensor(make_gmem_ptr(reinterpret_cast<ElementAccum *>(params.softmax_lse_ptr)), final_layout); constexpr int kNLsePerThread = (kMaxSplits * kBlockM + kNThreads - 1) / kNThreads; // Read the LSE values from gmem and store them in shared memory, then transpose them. constexpr int kRowsPerLoadLSE = kNThreads / kBlockM; #pragma unroll for (int l = 0; l < kNLsePerThread; ++l) { const int row = l * kRowsPerLoadLSE + tidx / kBlockM; const int col = tidx % kBlockM; ElementAccum lse = (row < params.num_splits && col < lse_size - bidx * kBlockM) ? gLSEaccum(row, col) : -INFINITY; if (row < kMaxSplits) { sLSE[row][col] = lse; } // if (bidx == 0 && tidx < 32) { printf("tidx = %d, row = %d, col = %d, lse = %f\n", tidx, row, col, lse); } } // if (bidx == 1 && tidx < 32) { printf("tidx = %d, row_offset_lse = %d, lse = %f\n", tidx, row_offset_lse, lse_accum(0)); } __syncthreads(); Tensor lse_accum = make_tensor<ElementAccum>(Shape<Int<kNLsePerThread>>{}); constexpr int kRowsPerLoadTranspose = std::min(kRowsPerLoadLSE, kMaxSplits); // To make sure that kMaxSplits is within 1 warp: we decide how many elements within kMaxSplits // each thread should hold. If kMaxSplits = 16, then each thread holds 2 elements (128 threads, // kBlockM rows, so each time we load we can load 128 / kBlockM rows). // constexpr int kThreadsPerSplit = kMaxSplits / kRowsPerLoadTranspose; // static_assert(kThreadsPerSplit <= 32); static_assert(kRowsPerLoadTranspose <= 32); static_assert(kNLsePerThread * kRowsPerLoadTranspose <= kMaxSplits); #pragma unroll for (int l = 0; l < kNLsePerThread; ++l) { const int row = l * kRowsPerLoadTranspose + tidx % kRowsPerLoadTranspose; const int col = tidx / kRowsPerLoadTranspose; lse_accum(l) = (row < kMaxSplits && col < kBlockM) ? sLSE[row][col] : -INFINITY; // if (bidx == 0 && tidx < 32) { printf("tidx = %d, row = %d, col = %d, lse = %f\n", tidx, row, col, lse_accum(l)); } } // Compute the logsumexp of the LSE along the split dimension. ElementAccum lse_max = lse_accum(0); #pragma unroll for (int l = 1; l < kNLsePerThread; ++l) { lse_max = max(lse_max, lse_accum(l)); } MaxOp<float> max_op; lse_max = Allreduce<kRowsPerLoadTranspose>::run(lse_max, max_op); lse_max = lse_max == -INFINITY ? 0.0f : lse_max; // In case all local LSEs are -inf float lse_sum = expf(lse_accum(0) - lse_max); #pragma unroll for (int l = 1; l < kNLsePerThread; ++l) { lse_sum += expf(lse_accum(l) - lse_max); } SumOp<float> sum_op; lse_sum = Allreduce<kRowsPerLoadTranspose>::run(lse_sum, sum_op); // For the case where all local lse == -INFINITY, we want to set lse_logsum to INFINITY. Otherwise // lse_logsum is log(0.0) = -INFINITY and we get NaN when we do lse_accum(l) - lse_logsum. ElementAccum lse_logsum = (lse_sum == 0.f || lse_sum != lse_sum) ? INFINITY : logf(lse_sum) + lse_max; // if (bidx == 0 && tidx < 32) { printf("tidx = %d, lse = %f, lse_max = %f, lse_logsum = %f\n", tidx, lse_accum(0), lse_max, lse_logsum); } if (tidx % kRowsPerLoadTranspose == 0 && tidx / kRowsPerLoadTranspose < kBlockM) { if (params.unpadded_lse) { const index_t lse_offset = row_offset_lse + tidx / kRowsPerLoadTranspose; if (lse_offset < lse_size) { gLSE_unpadded(lse_offset) = lse_logsum; } } else { gLSE(tidx / kRowsPerLoadTranspose) = lse_logsum; } } // Store the scales exp(lse - lse_logsum) in shared memory. #pragma unroll for (int l = 0; l < kNLsePerThread; ++l) { const int row = l * kRowsPerLoadTranspose + tidx % kRowsPerLoadTranspose; const int col = tidx / kRowsPerLoadTranspose; if (row < params.num_splits && col < kBlockM) { sLSE[row][col] = expf(lse_accum(l) - lse_logsum); } } __syncthreads(); const index_t row_offset_oaccum = bidx * kBlockM * params.d_rounded; Tensor gOaccum = make_tensor(make_gmem_ptr(reinterpret_cast<ElementAccum *>(params.oaccum_ptr) + row_offset_oaccum), Shape<Int<kBlockM>, Int<kHeadDim>>{}, Stride<Int<kHeadDim>, _1>{}); constexpr int kBlockN = kNThreads / kBlockM; using GmemLayoutAtomOaccum = Layout<Shape<Int<kBlockM>, Int<kBlockN>>, Stride<Int<kBlockN>, _1>>; using GmemTiledCopyOaccum = decltype( make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>{}, GmemLayoutAtomOaccum{}, Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per store GmemTiledCopyOaccum gmem_tiled_copy_Oaccum; auto gmem_thr_copy_Oaccum = gmem_tiled_copy_Oaccum.get_thread_slice(tidx); Tensor tOgOaccum = gmem_thr_copy_Oaccum.partition_S(gOaccum); Tensor tOrO = make_tensor<ElementAccum>(shape(tOgOaccum)); Tensor tOrOaccum = make_tensor<ElementAccum>(shape(tOgOaccum)); clear(tOrO); // Predicates Tensor cOaccum = make_identity_tensor(Shape<Int<kBlockM>, Int<kHeadDim>>{}); // Repeat the partitioning with identity layouts Tensor tOcOaccum = gmem_thr_copy_Oaccum.partition_S(cOaccum); Tensor tOpOaccum = make_tensor<bool>(make_shape(size<2>(tOgOaccum))); if (!Is_even_K) { #pragma unroll for (int k = 0; k < size(tOpOaccum); ++k) { tOpOaccum(k) = get<1>(tOcOaccum(0, 0, k)) < params.d; } } // Load Oaccum in then scale and accumulate to O for (int split = 0; split < params.num_splits; ++split) { flash::copy</*Is_even_MN=*/false, Is_even_K>( gmem_tiled_copy_Oaccum, tOgOaccum, tOrOaccum, tOcOaccum, tOpOaccum, params.b * params.h * params.seqlen_q - bidx * kBlockM ); #pragma unroll for (int m = 0; m < size<1>(tOrOaccum); ++m) { int row = get<0>(tOcOaccum(0, m, 0)); ElementAccum lse_scale = sLSE[split][row]; #pragma unroll for (int k = 0; k < size<2>(tOrOaccum); ++k) { #pragma unroll for (int i = 0; i < size<0>(tOrOaccum); ++i) { tOrO(i, m, k) += lse_scale * tOrOaccum(i, m, k); } } // if (cute::thread0()) { printf("lse_scale = %f, %f\n", sLSE[split][0], sLSE[split][1]); print(tOrOaccum); } } tOgOaccum.data() = tOgOaccum.data() + params.b * params.h * params.seqlen_q * params.d_rounded; } // if (cute::thread0()) { print_tensor(tOrO); } Tensor rO = flash::convert_type<Element>(tOrO); // Write to gO #pragma unroll for (int m = 0; m < size<1>(rO); ++m) { const int idx = bidx * kBlockM + get<0>(tOcOaccum(0, m, 0)); if (idx < params.b * params.h * params.seqlen_q) { const int batch_idx = idx / (params.h * params.seqlen_q); const int head_idx = (idx - batch_idx * (params.h * params.seqlen_q)) / params.seqlen_q; // The index to the rows of Q const int row = idx - batch_idx * (params.h * params.seqlen_q) - head_idx * params.seqlen_q; auto o_ptr = reinterpret_cast<Element *>(params.o_ptr) + batch_idx * params.o_batch_stride + head_idx * params.o_head_stride + row * params.o_row_stride; #pragma unroll for (int k = 0; k < size<2>(rO); ++k) { if (Is_even_K || tOpOaccum(k)) { const int col = get<1>(tOcOaccum(0, m, k)); Tensor gO = make_tensor(make_gmem_ptr(o_ptr + col), Shape<Int<decltype(size<0>(rO))::value>>{}, Stride<_1>{}); // TODO: Should check if this is using vectorized store, but it seems pretty fast copy(rO(_, m, k), gO); // if (bidx == 0 && tidx == 0) { printf("tidx = %d, idx = %d, batch_idx = %d, head_idx = %d, row = %d, col = %d\n", tidx, idx, batch_idx, head_idx, row, col); print(rO(_, m, k)); print(gO); } // reinterpret_cast<uint64_t *>(o_ptr)[col / 4] = recast<uint64_t>(rO)(0, m, k); } } } } } } // namespace flash
candle/candle-flash-attn/kernels/flash_fwd_kernel.h/0
{ "file_path": "candle/candle-flash-attn/kernels/flash_fwd_kernel.h", "repo_id": "candle", "token_count": 37133 }
[package] name = "candle-kernels" version = "0.8.2" edition = "2021" description = "CUDA kernels for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] [build-dependencies] bindgen_cuda = "0.1.1"
candle/candle-kernels/Cargo.toml/0
{ "file_path": "candle/candle-kernels/Cargo.toml", "repo_id": "candle", "token_count": 126 }
#include "cuda_utils.cuh" #include<stdint.h> #define WHERE_OP(TYPENAME, ID_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const ID_TYPENAME *ids, \ const TYPENAME *t, \ const TYPENAME *f, \ TYPENAME *out \ ) { \ const size_t *dims = info; \ const size_t *strides = info + num_dims; \ const size_t *strides_t = info + 2*num_dims; \ const size_t *strides_f = info + 3*num_dims; \ if (is_contiguous(num_dims, dims, strides) \ && is_contiguous(num_dims, dims, strides_f) \ && is_contiguous(num_dims, dims, strides_t)) { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ out[i] = ids[i] ? t[i] : f[i]; \ } \ } \ else { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \ unsigned strided_i_t = get_strided_index(i, num_dims, dims, strides_t); \ unsigned strided_i_f = get_strided_index(i, num_dims, dims, strides_f); \ out[i] = ids[strided_i] ? t[strided_i_t] : f[strided_i_f]; \ } \ } \ } \ #if __CUDA_ARCH__ >= 800 WHERE_OP(__nv_bfloat16, int64_t, where_i64_bf16) WHERE_OP(__nv_bfloat16, uint32_t, where_u32_bf16) WHERE_OP(__nv_bfloat16, uint8_t, where_u8_bf16) #endif #if __CUDA_ARCH__ >= 530 WHERE_OP(__half, int64_t, where_i64_f16) WHERE_OP(__half, uint32_t, where_u32_f16) WHERE_OP(__half, uint8_t, where_u8_f16) #endif WHERE_OP(float, int64_t, where_i64_f32) WHERE_OP(double, int64_t, where_i64_f64) WHERE_OP(uint8_t, int64_t, where_i64_u8) WHERE_OP(uint32_t, int64_t, where_i64_u32) WHERE_OP(int64_t, int64_t, where_i64_i64) WHERE_OP(float, uint32_t, where_u32_f32) WHERE_OP(double, uint32_t, where_u32_f64) WHERE_OP(uint8_t, uint32_t, where_u32_u8) WHERE_OP(uint32_t, uint32_t, where_u32_u32) WHERE_OP(int64_t, uint32_t, where_u32_i64) WHERE_OP(float, uint8_t, where_u8_f32) WHERE_OP(double, uint8_t, where_u8_f64) WHERE_OP(uint8_t, uint8_t, where_u8_u8) WHERE_OP(uint32_t, uint8_t, where_u8_u32) WHERE_OP(int64_t, uint8_t, where_u8_i64)
candle/candle-kernels/src/ternary.cu/0
{ "file_path": "candle/candle-kernels/src/ternary.cu", "repo_id": "candle", "token_count": 1159 }
#include <metal_stdlib> #include <metal_integer> #include <metal_atomic> using namespace metal; // Constants // 2^32 and 1/2^32. Useful for converting between float and uint. static constexpr constant ulong UNIF01_NORM32 = 4294967296; static constexpr constant float UNIF01_INV32 = 2.328306436538696289e-10; // 2 * pi static constexpr constant float TWO_PI = 2.0 * M_PI_F; static constexpr constant int3 S1 = {13, 19, 12}; static constexpr constant int3 S2 = {2, 25, 4}; static constexpr constant int3 S3 = {3, 11, 17}; // Used to prevent bad seeds. static constexpr constant uint64_t PHI[16] = { 0x9E3779B97F4A7C15, 0xF39CC0605CEDC834, 0x1082276BF3A27251, 0xF86C6A11D0C18E95, 0x2767F0B153D27B7F, 0x0347045B5BF1827F, 0x01886F0928403002, 0xC1D64BA40F335E36, 0xF06AD7AE9717877E, 0x85839D6EFFBD7DC6, 0x64D325D1C5371682, 0xCADD0CCCFDFFBBE1, 0x626E33B8D04B4331, 0xBBF73C790D94F79D, 0x471C4AB3ED3D82A5, 0xFEC507705E4AE6E5, }; // Combined Tausworthe and LCG Random Number Generator. // https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-37-efficient-random-number-generation-and-application // https://indico.cern.ch/event/93877/contributions/2118070/attachments/1104200/1575343/acat3_revised_final.pdf struct HybridTaus { float state; HybridTaus() thread = default; HybridTaus() threadgroup = default; HybridTaus() device = default; HybridTaus() constant = default; // Generate seeds for each thread. METAL_FUNC static uint4 seed_per_thread(const ulong4 seeds) { return uint4(ulong4(seeds) * ulong4(PHI[0], PHI[1], PHI[2], PHI[3]) * ulong4(1099087573UL)); } // Tausworthe generator. METAL_FUNC static uint taus(const uint z, const int3 s, const uint M) { uint b = (((z << s.x) ^ z) >> s.y); return (((z & M) << s.z) ^ b); } // LCG generator. METAL_FUNC static uint lcg(const uint z) { return (1664525 * z + 1013904223UL); } // Initialize the RNG state. METAL_FUNC static HybridTaus init(const ulong4 seeds) { uint4 seed = seed_per_thread(seeds); // Seed #1 uint z1 = taus(seed.x, S1, 4294967294UL); uint z2 = taus(seed.y, S2, 4294967288UL); uint z3 = taus(seed.z, S3, 4294967280UL); uint z4 = lcg(seed.x); // Seed #2 uint r1 = (z1^z2^z3^z4^seed.y); z1 = taus(r1, S1, 429496729UL); z2 = taus(r1, S2, 4294967288UL); z3 = taus(r1, S3, 429496280UL); z4 = lcg(r1); // Seed #3 r1 = (z1^z2^z3^z4^seed.z); z1 = taus(r1, S1, 429496729UL); z2 = taus(r1, S2, 4294967288UL); z3 = taus(r1, S3, 429496280UL); z4 = lcg(r1); // Seed #4 r1 = (z1^z2^z3^z4^seed.w); z1 = taus(r1, S1, 429496729UL); z2 = taus(r1, S2, 4294967288UL); z3 = taus(r1, S3, 429496280UL); z4 = lcg(r1); HybridTaus rng; rng.state = (z1^z2^z3^z4) * UNIF01_INV32; return rng; } METAL_FUNC float rand() { uint seed = this->state * UNIF01_NORM32; uint z1 = taus(seed, S1, 429496729UL); uint z2 = taus(seed, S2, 4294967288UL); uint z3 = taus(seed, S3, 429496280UL); uint z4 = lcg(seed); thread float result = this->state; this->state = (z1^z2^z3^z4) * UNIF01_INV32; return result; } }; template<typename T> METAL_FUNC void rand_uniform( constant size_t &size, constant float &min, constant float &max, device atomic_uint *seed, device T *out, uint tid [[thread_position_in_grid]] ) { if (tid >= size) { return; } // Evenly sized vectors need an offset when writing the mirror element. uint off = 1 - size % 2; float diff = abs(min - max); uint s = atomic_load_explicit(seed, memory_order_relaxed); HybridTaus rng = HybridTaus::init({ulong(s), tid, 1, 1}); out[tid] = static_cast<T>(rng.rand() * diff + min); if (tid == 0) { atomic_store_explicit(seed, uint(rng.rand() * UNIF01_NORM32), memory_order_relaxed); // Return early if tid == 0 && off == 0, otherwise we will write to out[size]. if (off == 0) return; } // Use symmetry to fill the other half of the array. out[size - off - tid] = static_cast<T>(rng.rand() * diff + min); } // Create Gaussian normal distribution using Box-Muller transform: // https://en.wikipedia.org/wiki/Box–Muller_transform template<typename T> METAL_FUNC void normal( constant size_t &size, constant float &mean, constant float &stddev, device atomic_uint *seed, device T *out, uint tid [[thread_position_in_grid]] ) { if (tid >= size) { return; } // Evenly sized vectors need an offset when writing the mirror element. uint off = 1 - size % 2; uint s = atomic_load_explicit(seed, memory_order_relaxed); HybridTaus rng = HybridTaus::init({ulong(s), tid, 1, 1}); float u1 = rng.rand(); float u2 = rng.rand(); float cosval; float sinval = sincos(TWO_PI * u2, cosval); float mag = stddev * sqrt(-2.0 * log(u1)); float z0 = mag * cosval + mean; float z1 = mag * sinval + mean; out[tid] = static_cast<T>(z0); if (tid == 0) { atomic_store_explicit(seed, uint(rng.rand() * UNIF01_NORM32), memory_order_relaxed); // Return early if tid == 0 && off == 0, otherwise we will write to out[size]. if (off == 0) return; } // Use symmetry to fill the other half of the array. out[size - off - tid] = static_cast<T>(z1); } #define UNIFORM_OP(NAME, T) \ kernel void rand_uniform_##NAME( \ constant size_t &size, \ constant float &min, \ constant float &max, \ device atomic_uint *seed, \ device T *out, \ uint tid [[thread_position_in_grid]] \ ) { \ rand_uniform<T>(size, min, max, seed, out, tid); \ } \ #define NORMAL_OP(NAME, T) \ kernel void rand_normal_##NAME( \ constant size_t &size, \ constant float &mean, \ constant float &stddev, \ device atomic_uint *seed, \ device T *out, \ uint tid [[thread_position_in_grid]] \ ) { \ normal<T>(size, mean, stddev, seed, out, tid); \ } \ #define RANDOM_OPS(NAME, T) \ UNIFORM_OP(NAME, T) \ NORMAL_OP(NAME, T) \ RANDOM_OPS(f32, float) RANDOM_OPS(f16, half) #if __METAL_VERSION__ >= 310 RANDOM_OPS(bf16, bfloat) #endif
candle/candle-metal-kernels/src/random.metal/0
{ "file_path": "candle/candle-metal-kernels/src/random.metal", "repo_id": "candle", "token_count": 3671 }
//! Layer Normalization. //! //! This layer applies Layer Normalization over a mini-batch of inputs as described in [`Layer //! Normalization`]. The input is expected to have three dimensions: a batch dimension, a length, //! and a hidden size, the normalization is applied over the last dimension. //! //! # Example //! //! ```rust //! use candle::{Tensor, Device::Cpu, test_utils::to_vec3_round}; //! use candle_nn::{LayerNorm, Module}; //! # fn main() -> candle::Result<()> { //! //! let w = Tensor::new(&[1f32, 1f32, 1f32], &Cpu)?; //! let b = Tensor::new(&[0f32, 0f32, 0f32], &Cpu)?; //! let layer = LayerNorm::new(w, b, 1e-5); //! //! let xs = Tensor::new( //! &[[[1f32, 2., 3.], [4., 5., 6.], [9., 8., 7.]]], //! &Cpu)?; //! let ys = layer.forward(&xs)?; //! assert_eq!( //! to_vec3_round(&ys, 4)?, //! &[[[-1.2247, 0.0, 1.2247], //! [-1.2247, 0.0, 1.2247], //! [ 1.2247, 0.0, -1.2247]]]); //! # Ok(()) } //! ``` //! //! [`Layer Normalization`]: https://arxiv.org/abs/1607.06450 use candle::{DType, Module, Result, Tensor, D}; #[derive(Debug, Clone, Copy, PartialEq)] pub struct LayerNormConfig { pub eps: f64, /// Whether to remove the mean or not, the default is true and when set to false, this turns /// this layer into RmsNorm. pub remove_mean: bool, pub affine: bool, } impl Default for LayerNormConfig { fn default() -> Self { Self { eps: 1e-5, remove_mean: true, affine: true, } } } impl From<f64> for LayerNormConfig { fn from(eps: f64) -> Self { Self { eps, remove_mean: true, affine: true, } } } // This layer norm version handles both weight and bias so removes the mean. #[derive(Clone, Debug)] pub struct LayerNorm { weight: Tensor, bias: Option<Tensor>, remove_mean: bool, eps: f64, } impl LayerNorm { pub fn new(weight: Tensor, bias: Tensor, eps: f64) -> Self { Self { weight, bias: Some(bias), remove_mean: true, eps, } } pub fn new_no_bias(weight: Tensor, eps: f64) -> Self { Self { weight, bias: None, remove_mean: true, eps, } } pub fn rms_norm(weight: Tensor, eps: f64) -> Self { Self { weight, bias: None, remove_mean: false, eps, } } pub fn weight(&self) -> &Tensor { &self.weight } pub fn bias(&self) -> Option<&Tensor> { self.bias.as_ref() } } impl Module for LayerNorm { fn forward(&self, x: &Tensor) -> Result<Tensor> { if x.is_contiguous() && self.remove_mean { if let Some(bias) = self.bias.as_ref() { return crate::ops::layer_norm(x, &self.weight, bias, self.eps as f32); } } let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = x.dim(D::Minus1)?; let x = x.to_dtype(internal_dtype)?; let x = if self.remove_mean { let mean_x = (x.sum_keepdim(D::Minus1)? / hidden_size as f64)?; x.broadcast_sub(&mean_x)? } else { x }; let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?; let x = x_normed.to_dtype(x_dtype)?.broadcast_mul(&self.weight)?; match &self.bias { None => Ok(x), Some(bias) => x.broadcast_add(bias), } } } pub fn layer_norm<C: Into<LayerNormConfig>>( size: usize, config: C, vb: crate::VarBuilder, ) -> Result<LayerNorm> { let config = config.into(); let weight = vb.get_with_hints(size, "weight", crate::Init::Const(1.))?; let bias = if config.affine { Some(vb.get_with_hints(size, "bias", crate::Init::Const(0.))?) } else { None }; Ok(LayerNorm { weight, bias, remove_mean: config.remove_mean, eps: config.eps, }) } pub fn layer_norm_no_bias(size: usize, eps: f64, vb: crate::VarBuilder) -> Result<LayerNorm> { let config = LayerNormConfig { eps, remove_mean: true, affine: false, }; layer_norm(size, config, vb) } /// RmsNorm is a specialized version of the LayerNorm module. #[derive(Clone, Debug)] pub struct RmsNorm(LayerNorm); impl RmsNorm { pub fn new(weight: Tensor, eps: f64) -> Self { Self(LayerNorm::rms_norm(weight, eps)) } pub fn into_inner(self) -> LayerNorm { self.0 } /// Faster variant of the forward kernel, this can only be used on contiguous tensors though. pub fn forward_diff(&self, xs: &Tensor) -> Result<Tensor> { self.0.forward(xs) } } impl Module for RmsNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { if xs.is_contiguous() { crate::ops::rms_norm(xs, &self.0.weight, self.0.eps as f32) } else { self.0.forward(xs) } } } pub fn rms_norm(size: usize, eps: f64, vb: crate::VarBuilder) -> Result<RmsNorm> { let config = LayerNormConfig { eps, remove_mean: false, affine: false, }; Ok(RmsNorm(layer_norm(size, config, vb)?)) }
candle/candle-nn/src/layer_norm.rs/0
{ "file_path": "candle/candle-nn/src/layer_norm.rs", "repo_id": "candle", "token_count": 2656 }
use candle::{Result, Shape, Tensor}; use candle_nn::encoding::one_hot; #[test] fn test_i64_one_hot() -> Result<()> { let device = candle::Device::Cpu; let indices = Tensor::new(vec![vec![0i64, 2], vec![1, -1]], &device)?; let depth = 4; let on_value = 1.0; let off_value = 0.0; let one_hot = one_hot::<f32>(indices, depth, on_value, off_value)?; let expected_matrix = [ [[1., 0., 0., 0.], [0., 0., 1., 0.]], [[0., 1., 0., 0.], [0., 0., 0., 0.]], ]; assert_eq!(one_hot.shape(), &Shape::from((2, 2, depth))); let matrix = one_hot.to_vec3::<f32>()?; assert_eq!(matrix, expected_matrix); Ok(()) } #[test] fn test_rank_3_one_hot() -> Result<()> { let device = candle::Device::Cpu; let indices = Tensor::new( vec![ vec![vec![0i64, 1], vec![2, 3]], vec![vec![3, 1], vec![1, -1]], ], &device, )?; let depth = 4; let on_value = 1.0; let off_value = 0.0; let one_hot = one_hot::<f32>(indices, depth, on_value, off_value)?; let expected_matrix = Tensor::new( vec![ vec![ vec![vec![1f32, 0., 0., 0.], vec![0., 1., 0., 0.]], vec![vec![0., 0., 1., 0.], vec![0., 0., 0., 1.]], ], vec![ vec![vec![0., 0., 0., 1.], vec![0., 1., 0., 0.]], vec![vec![0., 1., 0., 0.], vec![0., 0., 0., 0.]], ], ], &device, )?; assert_eq!(one_hot.shape(), expected_matrix.shape()); assert_eq!(one_hot.dims(), expected_matrix.dims()); let matrix = one_hot.get(1)?.to_vec3::<f32>()?; let expected_matrix = expected_matrix.get(1)?.to_vec3::<f32>()?; assert_eq!(matrix, expected_matrix); Ok(()) } #[test] fn test_u8_one_cold() -> Result<()> { let device = candle::Device::Cpu; let depth = 4; let indices = Tensor::new(vec![vec![0i64, 2], vec![1, -1]], &device)?; let on_value = 0u8; let off_value = 1; // Note that the method does not require the turbofish operator, as the type is inferred from the on_value. let one_cold = one_hot(indices, depth, on_value, off_value)?; let expected_matrix = [[[0, 1, 1, 1], [1, 1, 0, 1]], [[1, 0, 1, 1], [1, 1, 1, 1]]]; assert_eq!(one_cold.shape(), &Shape::from((2, 2, depth))); let matrix = one_cold.to_vec3::<u8>()?; assert_eq!(matrix, expected_matrix); Ok(()) } #[test] fn test_iter() -> Result<()> { let device = candle::Device::Cpu; let depth = 4; let indices = Tensor::new(vec![vec![0i64, 2], vec![1, -1]], &device)?; let matrix = indices.to_vec2::<i64>()?; let (dim1, dim2) = indices.dims2()?; let iter = (0..dim1).flat_map(|i| (0..dim2).map(move |j| (i, j))); let mut v = vec![0; depth * dim1 * dim2]; for (i, j) in iter { let idx = i * depth * dim2 + j * depth; v[idx] = matrix[i][j]; } for (i, row) in matrix.iter().enumerate() { for (j, &value) in row.iter().enumerate() { let idx = i * depth * dim2 + j * depth; assert_eq!(v[idx], value); } } Ok(()) }
candle/candle-nn/tests/one_hot.rs/0
{ "file_path": "candle/candle-nn/tests/one_hot.rs", "repo_id": "candle", "token_count": 1592 }
from typing import Union, Sequence class Tensor: """ This contains the type hints for the magic methodes of the `candle.Tensor` class. """ def __add__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __radd__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __sub__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Subtract a scalar from a tensor or one tensor from another. """ pass def __truediv__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Divide a tensor by a scalar or one tensor by another. """ pass def __mul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __rmul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __richcmp__(self, rhs: Union["Tensor", "Scalar"], op) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __getitem__(self, index: Union["Index", "Tensor", Sequence["Index"]]) -> "Tensor": """ Return a slice of a tensor. """ pass def __eq__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ne__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __lt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __le__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __gt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ge__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass
candle/candle-pyo3/_additional_typing/__init__.py/0
{ "file_path": "candle/candle-pyo3/_additional_typing/__init__.py", "repo_id": "candle", "token_count": 1174 }
import candle from candle import Tensor from candle.nn import Linear def test_linear_layer_can_be_constructed(): linear = Linear(10, 10) assert linear is not None def test_linear_layer_can_forward_a_singular_input(): linear = Linear(384, 1536) input_tensor = candle.randn((8, 384)) output = linear.forward(input_tensor) assert output.shape == (8, 1536) def test_linear_layer_can_forward_a_batched_input(): linear = Linear(384, 1536) input_tensor = candle.randn((16, 8, 384)) output = linear.forward(input_tensor) assert output.shape == (16, 8, 1536) def test_quantized_linear_layer_can_forward_a_singular_input(): linear = Linear(384, 1536) linear.weight = linear.weight.quantize("q4_0") input_tensor = candle.randn((8, 384)) output = linear.forward(input_tensor) assert output.shape == (8, 1536) def test_quantized_linear_layer_can_forward_a_batched_input(): linear = Linear(384, 1536) linear.weight = linear.weight.quantize("q4_0") input_tensor = candle.randn((16, 8, 384)) output = linear.forward(input_tensor) assert output.shape == (16, 8, 1536)
candle/candle-pyo3/tests/bindings/test_linear.py/0
{ "file_path": "candle/candle-pyo3/tests/bindings/test_linear.py", "repo_id": "candle", "token_count": 431 }
//! Implementation of DistilBert, a distilled version of BERT. //! //! See: //! - ["DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter"](https://arxiv.org/abs/1910.01108) //! use super::with_tracing::{layer_norm, linear, LayerNorm, Linear}; use candle::{DType, Device, Result, Tensor}; use candle_nn::{Embedding, Module, VarBuilder}; use serde::Deserialize; pub const DTYPE: DType = DType::F32; fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] #[serde(rename_all = "lowercase")] enum HiddenAct { Gelu, Relu, } struct HiddenActLayer { act: HiddenAct, span: tracing::Span, } impl HiddenActLayer { fn new(act: HiddenAct) -> Self { let span = tracing::span!(tracing::Level::TRACE, "hidden-act"); Self { act, span } } } impl Module for HiddenActLayer { fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> { let _enter = self.span.enter(); match self.act { // https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/activations.py#L213 HiddenAct::Gelu => xs.gelu(), HiddenAct::Relu => xs.relu(), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Default)] #[serde(rename_all = "lowercase")] enum PositionEmbeddingType { #[default] Absolute, } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { vocab_size: usize, dim: usize, n_layers: usize, n_heads: usize, hidden_dim: usize, activation: HiddenAct, max_position_embeddings: usize, initializer_range: f64, pad_token_id: usize, #[serde(default)] position_embedding_type: PositionEmbeddingType, #[serde(default)] use_cache: bool, model_type: Option<String>, } impl Default for Config { fn default() -> Self { Self { vocab_size: 30522, dim: 768, n_layers: 12, n_heads: 12, hidden_dim: 3072, activation: HiddenAct::Gelu, max_position_embeddings: 512, initializer_range: 0.02, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Absolute, use_cache: true, model_type: Some("distilbert".to_string()), } } } struct Embeddings { word_embeddings: Embedding, position_embeddings: Embedding, layer_norm: LayerNorm, span: tracing::Span, } impl Embeddings { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let word_embeddings = candle_nn::embedding(config.vocab_size, config.dim, vb.pp("word_embeddings"))?; let position_embeddings = candle_nn::embedding( config.max_position_embeddings, config.dim, vb.pp("position_embeddings"), )?; let layer_norm = layer_norm(config.dim, 1e-12, vb.pp("LayerNorm"))?; Ok(Self { word_embeddings, position_embeddings, layer_norm, span: tracing::span!(tracing::Level::TRACE, "embeddings"), }) } fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_bsize, seq_len) = input_ids.dims2()?; let input_embeddings = self.word_embeddings.forward(input_ids)?; let position_ids = (0..seq_len as u32).collect::<Vec<_>>(); let position_ids = Tensor::new(&position_ids[..], input_ids.device())?; let embeddings = input_embeddings.broadcast_add(&self.position_embeddings.forward(&position_ids)?)?; let embeddings = self.layer_norm.forward(&embeddings)?; Ok(embeddings) } } struct MultiHeadSelfAttention { q_lin: Linear, k_lin: Linear, v_lin: Linear, out_lin: Linear, n_heads: usize, attention_head_size: usize, span: tracing::Span, } impl MultiHeadSelfAttention { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let attention_head_size = config.dim / config.n_heads; let all_head_size = config.n_heads * attention_head_size; let dim = config.dim; let q_lin = linear(dim, all_head_size, vb.pp("q_lin"))?; let v_lin = linear(dim, all_head_size, vb.pp("v_lin"))?; let k_lin = linear(dim, all_head_size, vb.pp("k_lin"))?; let out_lin = linear(all_head_size, dim, vb.pp("out_lin"))?; Ok(Self { q_lin, k_lin, v_lin, out_lin, n_heads: config.n_heads, attention_head_size, span: tracing::span!(tracing::Level::TRACE, "attention"), }) } } impl MultiHeadSelfAttention { fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (bs, q_length, _dim) = hidden_states.dims3()?; let dim_per_head = self.attention_head_size; let q = self.q_lin.forward(hidden_states)?; let k = self.k_lin.forward(hidden_states)?; let v = self.v_lin.forward(hidden_states)?; let q = q .reshape((bs, q_length, self.n_heads, dim_per_head))? .transpose(1, 2)?; let k = k .reshape((bs, q_length, self.n_heads, dim_per_head))? .transpose(1, 2)?; let v = v .reshape((bs, q_length, self.n_heads, dim_per_head))? .transpose(1, 2)?; let q: Tensor = (q / (dim_per_head as f64).sqrt())?; let scores = q.matmul(&k.transpose(2, 3)?.contiguous()?)?; let mask = attention_mask.broadcast_as(scores.shape())?; let scores = masked_fill(&scores.to_dtype(DType::F32)?, &mask, f32::NEG_INFINITY)?; let weights = candle_nn::ops::softmax(&scores, candle::D::Minus1)?; let context = weights.matmul(&v.contiguous()?)?; let context = context .transpose(1, 2)? .reshape((bs, q_length, self.n_heads * dim_per_head))? .contiguous()?; let context = self.out_lin.forward(&context)?; Ok(context) } } #[allow(clippy::upper_case_acronyms)] struct FFN { lin1: Linear, lin2: Linear, activation: HiddenActLayer, span: tracing::Span, } impl FFN { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let lin1 = linear(config.dim, config.hidden_dim, vb.pp("lin1"))?; let lin2 = linear(config.hidden_dim, config.dim, vb.pp("lin2"))?; Ok(Self { lin1, lin2, activation: HiddenActLayer::new(config.activation), span: tracing::span!(tracing::Level::TRACE, "ffn"), }) } } impl Module for FFN { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); hidden_states .apply(&self.lin1)? .apply(&self.activation)? .apply(&self.lin2) } } struct TransformerBlock { attention: MultiHeadSelfAttention, sa_layer_norm: LayerNorm, ffn: FFN, output_layer_norm: LayerNorm, span: tracing::Span, } impl TransformerBlock { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let attention = MultiHeadSelfAttention::load(vb.pp("attention"), config)?; let sa_layer_norm = layer_norm(config.dim, 1e-12, vb.pp("sa_layer_norm"))?; let ffn = FFN::load(vb.pp("ffn"), config)?; let output_layer_norm = layer_norm(config.dim, 1e-12, vb.pp("output_layer_norm"))?; Ok(Self { attention, sa_layer_norm, ffn, output_layer_norm, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } } impl TransformerBlock { fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let sa_output = self.attention.forward(hidden_states, attention_mask)?; // TODO: Support cross-attention? // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523 // TODO: Support something similar to `apply_chunking_to_forward`? let sa_output = sa_output.broadcast_add(hidden_states)?; let sa_output = self.sa_layer_norm.forward(&sa_output)?; let ffn_output = self.ffn.forward(&sa_output)?; let ffn_output = (&ffn_output + sa_output)?; let output = self.output_layer_norm.forward(&ffn_output)?; Ok(output) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L556 struct Transformer { layers: Vec<TransformerBlock>, span: tracing::Span, } impl Transformer { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let layers = (0..config.n_layers) .map(|index| TransformerBlock::load(vb.pp(format!("layer.{index}")), config)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); Ok(Transformer { layers, span }) } } impl Transformer { fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut hidden_states = hidden_states.clone(); // Use a loop rather than a fold as it's easier to modify when adding debug/... for layer in self.layers.iter() { hidden_states = layer.forward(&hidden_states, attention_mask)?; } Ok(hidden_states) } } pub struct DistilBertModel { embeddings: Embeddings, transformer: Transformer, pub device: Device, span: tracing::Span, } impl DistilBertModel { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let (embeddings, transformer) = match ( Embeddings::load(vb.pp("embeddings"), config), Transformer::load(vb.pp("transformer"), config), ) { (Ok(embeddings), Ok(encoder)) => (embeddings, encoder), (Err(err), _) | (_, Err(err)) => { if let Some(model_type) = &config.model_type { if let (Ok(embeddings), Ok(encoder)) = ( Embeddings::load(vb.pp(format!("{model_type}.embeddings")), config), Transformer::load(vb.pp(format!("{model_type}.transformer")), config), ) { (embeddings, encoder) } else { return Err(err); } } else { return Err(err); } } }; Ok(Self { embeddings, transformer, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } pub fn forward(&self, input_ids: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let embedding_output = self.embeddings.forward(input_ids)?; let sequence_output = self .transformer .forward(&embedding_output, attention_mask)?; Ok(sequence_output) } }
candle/candle-transformers/src/models/distilbert.rs/0
{ "file_path": "candle/candle-transformers/src/models/distilbert.rs", "repo_id": "candle", "token_count": 5451 }
//! Helium inference implementation. //! //! See the model card on Hugging Face's [hub](https://huggingface.co/kmhf/helium-2b). use super::with_tracing::{linear_b as linear, Linear, RmsNorm}; use candle::{DType, Device, Result, Tensor, D}; use candle_nn::{Module, VarBuilder}; use std::sync::Arc; fn default_use_flash_attn() -> bool { false } #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub attention_bias: bool, pub bos_token_id: u32, pub eos_token_id: u32, pub head_dim: usize, pub hidden_act: candle_nn::Activation, pub hidden_size: usize, pub intermediate_size: usize, pub max_position_embeddings: usize, pub mlp_bias: bool, pub num_attention_heads: usize, pub num_hidden_layers: usize, pub num_key_value_heads: usize, pub rms_norm_eps: f64, pub rope_theta: f64, pub tie_word_embeddings: bool, pub vocab_size: usize, #[serde(default = "default_use_flash_attn")] pub use_flash_attn: bool, } impl Config { pub fn config_2b(use_flash_attn: bool) -> Self { Self { attention_bias: false, bos_token_id: 1, eos_token_id: 2, head_dim: 128, hidden_act: candle_nn::Activation::Silu, hidden_size: 2560, intermediate_size: 7040, max_position_embeddings: 4096, mlp_bias: false, num_attention_heads: 20, num_hidden_layers: 24, num_key_value_heads: 20, rms_norm_eps: 1e-08, rope_theta: 100000.0, tie_word_embeddings: false, vocab_size: 48000, use_flash_attn, } } } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let rope_theta = cfg.rope_theta as f32; let dim = cfg.head_dim; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / rope_theta.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(DType::F32)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(DType::F32)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?.to_dtype(dtype)?, cos: freqs.cos()?.to_dtype(dtype)?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope_i(q, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope_i(k, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: candle_nn::Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let bias = cfg.mlp_bias; let gate_proj = linear(hidden_sz, intermediate_sz, bias, vb.pp("gate_proj"))?; let up_proj = linear(hidden_sz, intermediate_sz, bias, vb.pp("up_proj"))?; let down_proj = linear(intermediate_sz, hidden_sz, bias, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, use_flash_attn: bool, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = cfg.head_dim; let bias = cfg.attention_bias; let q_proj = linear(hidden_sz, num_heads * head_dim, bias, vb.pp("q_proj"))?; let k_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("k_proj"))?; let v_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("v_proj"))?; let o_proj = linear(num_heads * head_dim, hidden_sz, bias, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, rotary_emb, kv_cache: None, use_flash_attn: cfg.use_flash_attn, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)? .contiguous()?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?; let attn_output = if self.use_flash_attn { // flash-attn expects (b_sz, seq_len, nheads, head_dim) let q = query_states.transpose(1, 2)?; let k = key_states.transpose(1, 2)?; let v = value_states.transpose(1, 2)?; let softmax_scale = 1f32 / (self.head_dim as f32).sqrt(); flash_attn(&q, &k, &v, softmax_scale, q_len > 1)?.transpose(1, 2)? } else { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.num_heads * self.head_dim))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = if cfg.tie_word_embeddings { Linear::from_weights(embed_tokens.embeddings().clone(), None) } else { linear(cfg.hidden_size, cfg.vocab_size, false, vb.pp("lm_head"))? }; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((1, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn embed_tokens(&self) -> &candle_nn::Embedding { &self.embed_tokens } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (_b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
candle/candle-transformers/src/models/helium.rs/0
{ "file_path": "candle/candle-transformers/src/models/helium.rs", "repo_id": "candle", "token_count": 6786 }
// Copyright (c) Kyutai, all rights reserved. // This source code is licensed under the license found in the // LICENSE file in the root directory of this source tree. use candle::{streaming, Module, Result, StreamTensor, StreamingModule, Tensor}; use candle_nn::VarBuilder; use super::conv::{StreamableConv1d, StreamableConvTranspose1d}; #[derive(Debug, Clone)] pub struct Config { pub dimension: usize, pub channels: usize, pub causal: bool, pub n_filters: usize, pub n_residual_layers: usize, pub ratios: Vec<usize>, pub activation: candle_nn::Activation, pub norm: super::conv::Norm, pub kernel_size: usize, pub residual_kernel_size: usize, pub last_kernel_size: usize, pub dilation_base: usize, pub pad_mode: super::conv::PadMode, pub true_skip: bool, pub compress: usize, pub lstm: usize, pub disable_norm_outer_blocks: usize, pub final_activation: Option<candle_nn::Activation>, } #[derive(Debug, Clone)] pub struct SeaNetResnetBlock { block: Vec<StreamableConv1d>, shortcut: Option<StreamableConv1d>, activation: candle_nn::Activation, skip_op: candle::StreamingBinOp, span: tracing::Span, } impl SeaNetResnetBlock { #[allow(clippy::too_many_arguments)] pub fn new( dim: usize, k_sizes_and_dilations: &[(usize, usize)], activation: candle_nn::Activation, norm: Option<super::conv::Norm>, causal: bool, pad_mode: super::conv::PadMode, compress: usize, true_skip: bool, vb: VarBuilder, ) -> Result<Self> { let mut block = Vec::with_capacity(k_sizes_and_dilations.len()); let hidden = dim / compress; let vb_b = vb.pp("block"); for (i, (k_size, dilation)) in k_sizes_and_dilations.iter().enumerate() { let in_c = if i == 0 { dim } else { hidden }; let out_c = if i == k_sizes_and_dilations.len() - 1 { dim } else { hidden }; let c = StreamableConv1d::new( in_c, out_c, /* k_size */ *k_size, /* stride */ 1, /* dilation */ *dilation, /* groups */ 1, /* bias */ true, /* causal */ causal, /* norm */ norm, /* pad_mode */ pad_mode, vb_b.pp(2 * i + 1), )?; block.push(c) } let shortcut = if true_skip { None } else { let c = StreamableConv1d::new( dim, dim, /* k_size */ 1, /* stride */ 1, /* dilation */ 1, /* groups */ 1, /* bias */ true, /* causal */ causal, /* norm */ norm, /* pad_mode */ pad_mode, vb.pp("shortcut"), )?; Some(c) }; Ok(Self { block, shortcut, activation, skip_op: streaming::StreamingBinOp::new(streaming::BinOp::Add, candle::D::Minus1), span: tracing::span!(tracing::Level::TRACE, "sea-resnet"), }) } } impl Module for SeaNetResnetBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut ys = xs.clone(); for block in self.block.iter() { ys = ys.apply(&self.activation)?.apply(block)?; } match self.shortcut.as_ref() { None => ys + xs, Some(shortcut) => ys + xs.apply(shortcut), } } } impl StreamingModule for SeaNetResnetBlock { fn reset_state(&mut self) { for block in self.block.iter_mut() { block.reset_state() } if let Some(shortcut) = self.shortcut.as_mut() { shortcut.reset_state() } } fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { let _enter = self.span.enter(); let mut ys = xs.clone(); for block in self.block.iter_mut() { ys = block.step(&ys.apply(&self.activation)?)?; } match self.shortcut.as_ref() { None => self.skip_op.step(&ys, xs), Some(shortcut) => self.skip_op.step(&ys, &xs.apply(shortcut)?), } } } #[derive(Debug, Clone)] struct EncoderLayer { residuals: Vec<SeaNetResnetBlock>, downsample: StreamableConv1d, } #[derive(Debug, Clone)] pub struct SeaNetEncoder { init_conv1d: StreamableConv1d, activation: candle_nn::Activation, layers: Vec<EncoderLayer>, final_conv1d: StreamableConv1d, span: tracing::Span, } impl SeaNetEncoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { if cfg.lstm > 0 { candle::bail!("seanet lstm is not supported") } let n_blocks = 2 + cfg.ratios.len(); let mut mult = 1usize; let init_norm = if cfg.disable_norm_outer_blocks >= 1 { None } else { Some(cfg.norm) }; let mut layer_idx = 0; let vb = vb.pp("layers"); let init_conv1d = StreamableConv1d::new( cfg.channels, mult * cfg.n_filters, cfg.kernel_size, /* stride */ 1, /* dilation */ 1, /* groups */ 1, /* bias */ true, /* causal */ cfg.causal, /* norm */ init_norm, /* pad_mode */ cfg.pad_mode, vb.pp(layer_idx), )?; layer_idx += 1; let mut layers = Vec::with_capacity(cfg.ratios.len()); for (i, &ratio) in cfg.ratios.iter().rev().enumerate() { let norm = if cfg.disable_norm_outer_blocks >= i + 2 { None } else { Some(cfg.norm) }; let mut residuals = Vec::with_capacity(cfg.n_residual_layers); for j in 0..cfg.n_residual_layers { let resnet_block = SeaNetResnetBlock::new( mult * cfg.n_filters, &[ (cfg.residual_kernel_size, cfg.dilation_base.pow(j as u32)), (1, 1), ], cfg.activation, norm, cfg.causal, cfg.pad_mode, cfg.compress, cfg.true_skip, vb.pp(layer_idx), )?; residuals.push(resnet_block); layer_idx += 1; } let downsample = StreamableConv1d::new( mult * cfg.n_filters, mult * cfg.n_filters * 2, /* k_size */ ratio * 2, /* stride */ ratio, /* dilation */ 1, /* groups */ 1, /* bias */ true, /* causal */ true, /* norm */ norm, /* pad_mode */ cfg.pad_mode, vb.pp(layer_idx + 1), )?; layer_idx += 2; let layer = EncoderLayer { downsample, residuals, }; layers.push(layer); mult *= 2 } let final_norm = if cfg.disable_norm_outer_blocks >= n_blocks { None } else { Some(cfg.norm) }; let final_conv1d = StreamableConv1d::new( mult * cfg.n_filters, cfg.dimension, cfg.last_kernel_size, /* stride */ 1, /* dilation */ 1, /* groups */ 1, /* bias */ true, /* causal */ cfg.causal, /* norm */ final_norm, /* pad_mode */ cfg.pad_mode, vb.pp(layer_idx + 1), )?; Ok(Self { init_conv1d, activation: cfg.activation, layers, final_conv1d, span: tracing::span!(tracing::Level::TRACE, "sea-encoder"), }) } } impl Module for SeaNetEncoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.apply(&self.init_conv1d)?; for layer in self.layers.iter() { for residual in layer.residuals.iter() { xs = xs.apply(residual)? } xs = xs.apply(&self.activation)?.apply(&layer.downsample)?; } xs.apply(&self.activation)?.apply(&self.final_conv1d) } } impl StreamingModule for SeaNetEncoder { fn reset_state(&mut self) { self.init_conv1d.reset_state(); self.layers.iter_mut().for_each(|v| { v.residuals.iter_mut().for_each(|v| v.reset_state()); v.downsample.reset_state() }); self.final_conv1d.reset_state(); } fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { let _enter = self.span.enter(); let mut xs = self.init_conv1d.step(xs)?; for layer in self.layers.iter_mut() { for residual in layer.residuals.iter_mut() { xs = residual.step(&xs)?; } xs = layer.downsample.step(&xs.apply(&self.activation)?)?; } self.final_conv1d.step(&xs.apply(&self.activation)?) } } #[derive(Debug, Clone)] struct DecoderLayer { upsample: StreamableConvTranspose1d, residuals: Vec<SeaNetResnetBlock>, } #[derive(Debug, Clone)] pub struct SeaNetDecoder { init_conv1d: StreamableConv1d, activation: candle_nn::Activation, layers: Vec<DecoderLayer>, final_conv1d: StreamableConv1d, final_activation: Option<candle_nn::Activation>, span: tracing::Span, } impl SeaNetDecoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { if cfg.lstm > 0 { candle::bail!("seanet lstm is not supported") } let n_blocks = 2 + cfg.ratios.len(); let mut mult = 1 << cfg.ratios.len(); let init_norm = if cfg.disable_norm_outer_blocks == n_blocks { None } else { Some(cfg.norm) }; let mut layer_idx = 0; let vb = vb.pp("layers"); let init_conv1d = StreamableConv1d::new( cfg.dimension, mult * cfg.n_filters, cfg.kernel_size, /* stride */ 1, /* dilation */ 1, /* groups */ 1, /* bias */ true, /* causal */ cfg.causal, /* norm */ init_norm, /* pad_mode */ cfg.pad_mode, vb.pp(layer_idx), )?; layer_idx += 1; let mut layers = Vec::with_capacity(cfg.ratios.len()); for (i, &ratio) in cfg.ratios.iter().enumerate() { let norm = if cfg.disable_norm_outer_blocks + i + 1 >= n_blocks { None } else { Some(cfg.norm) }; let upsample = StreamableConvTranspose1d::new( mult * cfg.n_filters, mult * cfg.n_filters / 2, /* k_size */ ratio * 2, /* stride */ ratio, /* groups */ 1, /* bias */ true, /* causal */ true, /* norm */ norm, vb.pp(layer_idx + 1), )?; layer_idx += 2; let mut residuals = Vec::with_capacity(cfg.n_residual_layers); for j in 0..cfg.n_residual_layers { let resnet_block = SeaNetResnetBlock::new( mult * cfg.n_filters / 2, &[ (cfg.residual_kernel_size, cfg.dilation_base.pow(j as u32)), (1, 1), ], cfg.activation, norm, cfg.causal, cfg.pad_mode, cfg.compress, cfg.true_skip, vb.pp(layer_idx), )?; residuals.push(resnet_block); layer_idx += 1; } let layer = DecoderLayer { upsample, residuals, }; layers.push(layer); mult /= 2 } let final_norm = if cfg.disable_norm_outer_blocks >= 1 { None } else { Some(cfg.norm) }; let final_conv1d = StreamableConv1d::new( cfg.n_filters, cfg.channels, cfg.last_kernel_size, /* stride */ 1, /* dilation */ 1, /* groups */ 1, /* bias */ true, /* causal */ cfg.causal, /* norm */ final_norm, /* pad_mode */ cfg.pad_mode, vb.pp(layer_idx + 1), )?; Ok(Self { init_conv1d, activation: cfg.activation, layers, final_conv1d, final_activation: cfg.final_activation, span: tracing::span!(tracing::Level::TRACE, "sea-decoder"), }) } } impl Module for SeaNetDecoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut xs = xs.apply(&self.init_conv1d)?; for layer in self.layers.iter() { xs = xs.apply(&self.activation)?.apply(&layer.upsample)?; for residual in layer.residuals.iter() { xs = xs.apply(residual)? } } let xs = xs.apply(&self.activation)?.apply(&self.final_conv1d)?; let xs = match self.final_activation.as_ref() { None => xs, Some(act) => xs.apply(act)?, }; Ok(xs) } } impl StreamingModule for SeaNetDecoder { fn reset_state(&mut self) { self.init_conv1d.reset_state(); self.layers.iter_mut().for_each(|v| { v.residuals.iter_mut().for_each(|v| v.reset_state()); v.upsample.reset_state() }); self.final_conv1d.reset_state(); } fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { let _enter = self.span.enter(); let mut xs = self.init_conv1d.step(xs)?; for layer in self.layers.iter_mut() { xs = layer.upsample.step(&xs.apply(&self.activation)?)?; for residual in layer.residuals.iter_mut() { xs = residual.step(&xs)?; } } let xs = self.final_conv1d.step(&xs.apply(&self.activation)?)?; let xs = match self.final_activation.as_ref() { None => xs, Some(act) => xs.apply(act)?, }; Ok(xs) } }
candle/candle-transformers/src/models/mimi/seanet.rs/0
{ "file_path": "candle/candle-transformers/src/models/mimi/seanet.rs", "repo_id": "candle", "token_count": 8092 }
//! Module implementing the MPT (Multi-Purpose Transformer) model //! //! References: //! - [MPT Model used by replit-code-v1_5-3b](https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py) //! - [Configuration](https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/configuration_mpt.py) //! //! The model uses grouped query attention and alibi positional embeddings. use crate::models::with_tracing::{linear_no_bias, Embedding, Linear}; /// MPT model used by replit-code-v1_5-3b /// https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/modeling_mpt.py use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, VarBuilder}; // https://huggingface.co/replit/replit-code-v1_5-3b/blob/main/configuration_mpt.py #[derive(Debug, Clone, PartialEq)] pub struct Config { pub(crate) d_model: usize, pub(crate) n_heads: usize, pub(crate) n_layers: usize, pub(crate) expansion_ratio: usize, pub(crate) max_seq_len: usize, pub(crate) vocab_size: usize, pub(crate) kv_n_heads: usize, pub(crate) attn_prefix_lm: bool, pub(crate) attn_alibi: bool, pub(crate) attn_alibi_bias_max: usize, } impl Config { pub fn replit_code_v1_5_3b() -> Self { Self { d_model: 3072, n_heads: 24, n_layers: 32, expansion_ratio: 4, max_seq_len: 4096, vocab_size: 32768, kv_n_heads: 8, attn_prefix_lm: false, attn_alibi: true, attn_alibi_bias_max: 8, } } pub fn is_causal(&self) -> bool { !self.attn_prefix_lm } } #[derive(Debug, Clone)] struct GroupedQueryAttention { wqkv: Linear, out_proj: Linear, kv_cache: Option<(Tensor, Tensor)>, softmax_scale: f64, head_dim: usize, d_model: usize, n_heads: usize, kv_n_heads: usize, attn_bias: Tensor, span: tracing::Span, } impl GroupedQueryAttention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let head_dim = cfg.d_model / cfg.n_heads; let wqkv_size = cfg.d_model + 2 * cfg.kv_n_heads * head_dim; let wqkv = linear_no_bias(cfg.d_model, wqkv_size, vb.pp("Wqkv"))?; let softmax_scale = 1f64 / (head_dim as f64).sqrt(); let out_proj = linear_no_bias(cfg.d_model, cfg.d_model, vb.pp("out_proj"))?; let attn_bias = build_alibi_bias(cfg)?.to_device(vb.device())?; Ok(Self { wqkv, out_proj, kv_cache: None, softmax_scale, head_dim, d_model: cfg.d_model, n_heads: cfg.n_heads, kv_n_heads: cfg.kv_n_heads, attn_bias, span: tracing::span!(tracing::Level::TRACE, "gqa"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len, _n_embd) = xs.dims3()?; let qkv = self.wqkv.forward(xs)?; let query = qkv.narrow(2, 0, self.d_model)?; let kv_size = self.kv_n_heads * self.head_dim; let key = qkv.narrow(2, self.d_model, kv_size)?; let value = qkv.narrow(2, self.d_model + kv_size, kv_size)?; // scaled_multihead_dot_product_attention let query = query .reshape((b_size, seq_len, self.n_heads, ()))? .transpose(1, 2)?; // b,h,s,d let key = key .reshape((b_size, seq_len, self.kv_n_heads, ()))? .permute((0, 2, 3, 1))?; // b,h,d,s let value = value .reshape((b_size, seq_len, self.kv_n_heads, ()))? .transpose(1, 2)?; // b,h,s,d let (key, value) = match &self.kv_cache { None => (key, value), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &key], 3)?; let v = Tensor::cat(&[prev_v, &value], 2)?; (k, v) } }; self.kv_cache = Some((key.clone(), value.clone())); let query = query.contiguous()?; let key = crate::utils::repeat_kv(key, self.n_heads / self.kv_n_heads)?.contiguous()?; let value = crate::utils::repeat_kv(value, self.n_heads / self.kv_n_heads)?.contiguous()?; let attn_weights = (query.matmul(&key)? * self.softmax_scale)?; let attn_bias = { let s_q = query.dim(D::Minus2)?; let s_k = key.dim(D::Minus1)?; let (_, _, a_q, a_k) = self.attn_bias.dims4()?; let start_q = a_q.saturating_sub(s_q); let start_k = a_k.saturating_sub(s_k); self.attn_bias.i((.., .., start_q.., start_k..))? }; let attn_weights = attn_weights.broadcast_add(&attn_bias)?; let attn_weights = match mask { None => attn_weights, Some(mask) => masked_fill( &attn_weights, &mask.broadcast_as(attn_weights.shape())?, f32::NEG_INFINITY, )?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_weights .matmul(&value)? .transpose(1, 2)? .flatten_from(D::Minus2)?; let out = attn_output.apply(&self.out_proj)?; Ok(out) } } #[derive(Debug, Clone)] struct Ffn { up_proj: Linear, down_proj: Linear, } impl Ffn { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden = cfg.d_model * cfg.expansion_ratio; let up_proj = linear_no_bias(cfg.d_model, hidden, vb.pp("up_proj"))?; let down_proj = linear_no_bias(hidden, cfg.d_model, vb.pp("down_proj"))?; Ok(Self { up_proj, down_proj }) } } impl Module for Ffn { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.up_proj)?.gelu_erf()?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct MPTBlock { norm1: LayerNorm, // Do we need the low-precision variant? attn: GroupedQueryAttention, norm2: LayerNorm, ffn: Ffn, } impl MPTBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln_cfg = candle_nn::LayerNormConfig { affine: false, ..Default::default() }; let norm1 = layer_norm(cfg.d_model, ln_cfg, vb.pp("norm_1"))?; let norm2 = layer_norm(cfg.d_model, ln_cfg, vb.pp("norm_2"))?; let attn = GroupedQueryAttention::new(cfg, vb.pp("attn"))?; let ffn = Ffn::new(cfg, vb.pp("ffn"))?; Ok(Self { norm1, attn, norm2, ffn, }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.norm1)?; let xs = self.attn.forward(&xs, mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.norm2)?.apply(&self.ffn)?; xs + residual } } pub(crate) fn build_alibi_bias(cfg: &Config) -> Result<Tensor> { let full = !cfg.is_causal(); let seq_len = cfg.max_seq_len; let alibi_bias = Tensor::arange(1 - seq_len as i64, 1, &Device::Cpu)?; let alibi_bias = if full { let a1 = alibi_bias.reshape((1, 1, 1, seq_len))?; let a2 = alibi_bias.reshape((1, 1, seq_len, 1))?; a1.broadcast_sub(&a2)?.abs()?.neg()? } else { alibi_bias.reshape((1, 1, 1, seq_len))? }; let mut n_heads2 = 1; while n_heads2 < cfg.n_heads { n_heads2 *= 2 } let slopes = (1..=n_heads2) .map(|v| 1f32 / 2f32.powf((v * cfg.attn_alibi_bias_max) as f32 / n_heads2 as f32)) .collect::<Vec<_>>(); let slopes = if n_heads2 == cfg.n_heads { slopes } else { slopes .iter() .skip(1) .step_by(2) .chain(slopes.iter().step_by(2)) .take(cfg.n_heads) .cloned() .collect::<Vec<f32>>() }; let slopes = Tensor::new(slopes, &Device::Cpu)?.reshape((1, (), 1, 1))?; alibi_bias.to_dtype(DType::F32)?.broadcast_mul(&slopes) } #[derive(Debug, Clone)] pub struct Model { wte: Embedding, blocks: Vec<MPTBlock>, norm_f: LayerNorm, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let wte = Embedding::new(cfg.vocab_size, cfg.d_model, vb.pp("wte"))?; let vb_b = vb.pp("blocks"); let mut blocks = Vec::with_capacity(cfg.n_layers); for i in 0..cfg.n_layers { let block = MPTBlock::new(cfg, vb_b.pp(i))?; blocks.push(block) } let ln_cfg = candle_nn::LayerNormConfig { affine: false, ..Default::default() }; let norm_f = candle_nn::layer_norm(cfg.d_model, ln_cfg, vb.pp("norm_f"))?; Ok(Self { wte, blocks, norm_f, }) } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let (_b_size, seq_len) = xs.dims2()?; let mut xs = xs.apply(&self.wte)?; let mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.device())?) }; for block in self.blocks.iter_mut() { xs = block.forward(&xs, mask.as_ref())?; } let xs = xs.apply(&self.norm_f)?; let logits = xs .narrow(1, seq_len - 1, 1)? .squeeze(1)? .matmul(&self.wte.embeddings().t()?)? .squeeze(1)?; Ok(logits) } } pub(crate) fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } pub(crate) fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) }
candle/candle-transformers/src/models/mpt.rs/0
{ "file_path": "candle/candle-transformers/src/models/mpt.rs", "repo_id": "candle", "token_count": 5366 }
//! Qwen2 model implementation with quantization support. //! //! Qwen2 is a large language model from Alibaba optimized for efficiency. //! This implementation provides quantization for reduced memory and compute. //! //! Key characteristics: //! - Streaming decode support //! - Grouped query attention (GQA) //! - RMSNorm for layer normalization //! - Rotary positional embeddings (RoPE) //! - Support for 8-bit quantization //! //! References: //! - πŸ€— [Qwen2 Model](https://huggingface.co/Qwen/Qwen2-7B) //! use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use std::sync::Arc; #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub max_position_embeddings: usize, pub sliding_window: usize, pub max_window_layers: usize, pub tie_word_embeddings: bool, pub rope_theta: f64, pub rms_norm_eps: f64, pub use_sliding_window: bool, pub hidden_act: Activation, } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, sliding_window: usize, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; Ok(Self { embed_tokens, layers, norm, sliding_window: cfg.sliding_window, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_causal_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + self.sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } fn prepare_attention_mask(&self, attn_mask: &Tensor) -> Result<Tensor> { let (b_sz, sql_len) = attn_mask.dims2()?; let mut mask: Vec<Tensor> = vec![]; for b in 0..b_sz { mask.push(attn_mask.i((b, ..))?.expand((1, 1, sql_len, sql_len))?); } let mask = Tensor::cat(&mask, 0)?; let on_true = mask.zeros_like()?.to_dtype(self.dtype)?; let on_false = Tensor::new(f32::NEG_INFINITY, &self.device)? .broadcast_as(mask.shape())? .to_dtype(self.dtype)?; mask.where_cond(&on_true, &on_false) } pub fn forward( &mut self, input_ids: &Tensor, seqlen_offset: usize, attn_mask: Option<&Tensor>, ) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask: Option<Tensor> = match attn_mask { Some(mask) => Some(self.prepare_attention_mask(mask)?), None => { if seq_len <= 1 { None } else { Some(self.prepare_causal_attention_mask(b_size, seq_len, seqlen_offset)?) } } }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.apply(&self.norm) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } } #[derive(Debug, Clone)] pub struct ModelForCausalLM { base_model: Model, lm_head: Linear, } impl ModelForCausalLM { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let base_model = Model::new(cfg, vb.clone())?; let lm_head = if vb.contains_tensor("lm_head.weight") { linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))? } else { Linear::from_weights(base_model.embed_tokens.embeddings().clone(), None) }; Ok(Self { base_model, lm_head, }) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (_b_size, seq_len) = input_ids.dims2()?; self.base_model .forward(input_ids, seqlen_offset, None)? .narrow(1, seq_len - 1, 1)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { self.base_model.clear_kv_cache() } }
candle/candle-transformers/src/models/qwen2.rs/0
{ "file_path": "candle/candle-transformers/src/models/qwen2.rs", "repo_id": "candle", "token_count": 6864 }
//! Attention Based Building Blocks use candle::{DType, IndexOp, Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug)] struct GeGlu { proj: nn::Linear, span: tracing::Span, } impl GeGlu { fn new(vs: nn::VarBuilder, dim_in: usize, dim_out: usize) -> Result<Self> { let proj = nn::linear(dim_in, dim_out * 2, vs.pp("proj"))?; let span = tracing::span!(tracing::Level::TRACE, "geglu"); Ok(Self { proj, span }) } } impl Module for GeGlu { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states_and_gate = self.proj.forward(xs)?.chunk(2, D::Minus1)?; &hidden_states_and_gate[0] * hidden_states_and_gate[1].gelu()? } } /// A feed-forward layer. #[derive(Debug)] struct FeedForward { project_in: GeGlu, linear: nn::Linear, span: tracing::Span, } impl FeedForward { // The glu parameter in the python code is unused? // https://github.com/huggingface/diffusers/blob/d3d22ce5a894becb951eec03e663951b28d45135/src/diffusers/models/attention.py#L347 /// Creates a new feed-forward layer based on some given input dimension, some /// output dimension, and a multiplier to be used for the intermediary layer. fn new(vs: nn::VarBuilder, dim: usize, dim_out: Option<usize>, mult: usize) -> Result<Self> { let inner_dim = dim * mult; let dim_out = dim_out.unwrap_or(dim); let vs = vs.pp("net"); let project_in = GeGlu::new(vs.pp("0"), dim, inner_dim)?; let linear = nn::linear(inner_dim, dim_out, vs.pp("2"))?; let span = tracing::span!(tracing::Level::TRACE, "ff"); Ok(Self { project_in, linear, span, }) } } impl Module for FeedForward { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.project_in.forward(xs)?; self.linear.forward(&xs) } } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } #[derive(Debug)] pub struct CrossAttention { to_q: nn::Linear, to_k: nn::Linear, to_v: nn::Linear, to_out: nn::Linear, heads: usize, scale: f64, slice_size: Option<usize>, span: tracing::Span, span_attn: tracing::Span, span_softmax: tracing::Span, use_flash_attn: bool, } impl CrossAttention { // Defaults should be heads = 8, dim_head = 64, context_dim = None pub fn new( vs: nn::VarBuilder, query_dim: usize, context_dim: Option<usize>, heads: usize, dim_head: usize, slice_size: Option<usize>, use_flash_attn: bool, ) -> Result<Self> { let inner_dim = dim_head * heads; let context_dim = context_dim.unwrap_or(query_dim); let scale = 1.0 / f64::sqrt(dim_head as f64); let to_q = nn::linear_no_bias(query_dim, inner_dim, vs.pp("to_q"))?; let to_k = nn::linear_no_bias(context_dim, inner_dim, vs.pp("to_k"))?; let to_v = nn::linear_no_bias(context_dim, inner_dim, vs.pp("to_v"))?; let to_out = nn::linear(inner_dim, query_dim, vs.pp("to_out.0"))?; let span = tracing::span!(tracing::Level::TRACE, "xa"); let span_attn = tracing::span!(tracing::Level::TRACE, "xa-attn"); let span_softmax = tracing::span!(tracing::Level::TRACE, "xa-softmax"); Ok(Self { to_q, to_k, to_v, to_out, heads, scale, slice_size, span, span_attn, span_softmax, use_flash_attn, }) } fn reshape_heads_to_batch_dim(&self, xs: &Tensor) -> Result<Tensor> { let (batch_size, seq_len, dim) = xs.dims3()?; xs.reshape((batch_size, seq_len, self.heads, dim / self.heads))? .transpose(1, 2)? .reshape((batch_size * self.heads, seq_len, dim / self.heads)) } fn reshape_batch_dim_to_heads(&self, xs: &Tensor) -> Result<Tensor> { let (batch_size, seq_len, dim) = xs.dims3()?; xs.reshape((batch_size / self.heads, self.heads, seq_len, dim))? .transpose(1, 2)? .reshape((batch_size / self.heads, seq_len, dim * self.heads)) } fn sliced_attention( &self, query: &Tensor, key: &Tensor, value: &Tensor, slice_size: usize, ) -> Result<Tensor> { let batch_size_attention = query.dim(0)?; let mut hidden_states = Vec::with_capacity(batch_size_attention / slice_size); let in_dtype = query.dtype(); let query = query.to_dtype(DType::F32)?; let key = key.to_dtype(DType::F32)?; let value = value.to_dtype(DType::F32)?; for i in 0..batch_size_attention / slice_size { let start_idx = i * slice_size; let end_idx = (i + 1) * slice_size; let xs = query .i(start_idx..end_idx)? .matmul(&(key.i(start_idx..end_idx)?.t()? * self.scale)?)?; let xs = nn::ops::softmax(&xs, D::Minus1)?.matmul(&value.i(start_idx..end_idx)?)?; hidden_states.push(xs) } let hidden_states = Tensor::stack(&hidden_states, 0)?.to_dtype(in_dtype)?; self.reshape_batch_dim_to_heads(&hidden_states) } fn attention(&self, query: &Tensor, key: &Tensor, value: &Tensor) -> Result<Tensor> { let _enter = self.span_attn.enter(); let xs = if self.use_flash_attn { let init_dtype = query.dtype(); let q = query .to_dtype(candle::DType::F16)? .unsqueeze(0)? .transpose(1, 2)?; let k = key .to_dtype(candle::DType::F16)? .unsqueeze(0)? .transpose(1, 2)?; let v = value .to_dtype(candle::DType::F16)? .unsqueeze(0)? .transpose(1, 2)?; flash_attn(&q, &k, &v, self.scale as f32, false)? .transpose(1, 2)? .squeeze(0)? .to_dtype(init_dtype)? } else { let in_dtype = query.dtype(); let query = query.to_dtype(DType::F32)?; let key = key.to_dtype(DType::F32)?; let value = value.to_dtype(DType::F32)?; let xs = query.matmul(&(key.t()? * self.scale)?)?; let xs = { let _enter = self.span_softmax.enter(); nn::ops::softmax_last_dim(&xs)? }; xs.matmul(&value)?.to_dtype(in_dtype)? }; self.reshape_batch_dim_to_heads(&xs) } pub fn forward(&self, xs: &Tensor, context: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let query = self.to_q.forward(xs)?; let context = context.unwrap_or(xs).contiguous()?; let key = self.to_k.forward(&context)?; let value = self.to_v.forward(&context)?; let query = self.reshape_heads_to_batch_dim(&query)?; let key = self.reshape_heads_to_batch_dim(&key)?; let value = self.reshape_heads_to_batch_dim(&value)?; let dim0 = query.dim(0)?; let slice_size = self.slice_size.and_then(|slice_size| { if dim0 < slice_size { None } else { Some(slice_size) } }); let xs = match slice_size { None => self.attention(&query, &key, &value)?, Some(slice_size) => self.sliced_attention(&query, &key, &value, slice_size)?, }; self.to_out.forward(&xs) } } /// A basic Transformer block. #[derive(Debug)] struct BasicTransformerBlock { attn1: CrossAttention, ff: FeedForward, attn2: CrossAttention, norm1: nn::LayerNorm, norm2: nn::LayerNorm, norm3: nn::LayerNorm, span: tracing::Span, } impl BasicTransformerBlock { fn new( vs: nn::VarBuilder, dim: usize, n_heads: usize, d_head: usize, context_dim: Option<usize>, sliced_attention_size: Option<usize>, use_flash_attn: bool, ) -> Result<Self> { let attn1 = CrossAttention::new( vs.pp("attn1"), dim, None, n_heads, d_head, sliced_attention_size, use_flash_attn, )?; let ff = FeedForward::new(vs.pp("ff"), dim, None, 4)?; let attn2 = CrossAttention::new( vs.pp("attn2"), dim, context_dim, n_heads, d_head, sliced_attention_size, use_flash_attn, )?; let norm1 = nn::layer_norm(dim, 1e-5, vs.pp("norm1"))?; let norm2 = nn::layer_norm(dim, 1e-5, vs.pp("norm2"))?; let norm3 = nn::layer_norm(dim, 1e-5, vs.pp("norm3"))?; let span = tracing::span!(tracing::Level::TRACE, "basic-transformer"); Ok(Self { attn1, ff, attn2, norm1, norm2, norm3, span, }) } fn forward(&self, xs: &Tensor, context: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let xs = (self.attn1.forward(&self.norm1.forward(xs)?, None)? + xs)?; let xs = (self.attn2.forward(&self.norm2.forward(&xs)?, context)? + xs)?; self.ff.forward(&self.norm3.forward(&xs)?)? + xs } } #[derive(Debug, Clone, Copy)] pub struct SpatialTransformerConfig { pub depth: usize, pub num_groups: usize, pub context_dim: Option<usize>, pub sliced_attention_size: Option<usize>, pub use_linear_projection: bool, } impl Default for SpatialTransformerConfig { fn default() -> Self { Self { depth: 1, num_groups: 32, context_dim: None, sliced_attention_size: None, use_linear_projection: false, } } } #[derive(Debug)] enum Proj { Conv2d(nn::Conv2d), Linear(nn::Linear), } // Aka Transformer2DModel #[derive(Debug)] pub struct SpatialTransformer { norm: nn::GroupNorm, proj_in: Proj, transformer_blocks: Vec<BasicTransformerBlock>, proj_out: Proj, span: tracing::Span, pub config: SpatialTransformerConfig, } impl SpatialTransformer { pub fn new( vs: nn::VarBuilder, in_channels: usize, n_heads: usize, d_head: usize, use_flash_attn: bool, config: SpatialTransformerConfig, ) -> Result<Self> { let inner_dim = n_heads * d_head; let norm = nn::group_norm(config.num_groups, in_channels, 1e-6, vs.pp("norm"))?; let proj_in = if config.use_linear_projection { Proj::Linear(nn::linear(in_channels, inner_dim, vs.pp("proj_in"))?) } else { Proj::Conv2d(nn::conv2d( in_channels, inner_dim, 1, Default::default(), vs.pp("proj_in"), )?) }; let mut transformer_blocks = vec![]; let vs_tb = vs.pp("transformer_blocks"); for index in 0..config.depth { let tb = BasicTransformerBlock::new( vs_tb.pp(index.to_string()), inner_dim, n_heads, d_head, config.context_dim, config.sliced_attention_size, use_flash_attn, )?; transformer_blocks.push(tb) } let proj_out = if config.use_linear_projection { Proj::Linear(nn::linear(in_channels, inner_dim, vs.pp("proj_out"))?) } else { Proj::Conv2d(nn::conv2d( inner_dim, in_channels, 1, Default::default(), vs.pp("proj_out"), )?) }; let span = tracing::span!(tracing::Level::TRACE, "spatial-transformer"); Ok(Self { norm, proj_in, transformer_blocks, proj_out, span, config, }) } pub fn forward(&self, xs: &Tensor, context: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let (batch, _channel, height, weight) = xs.dims4()?; let residual = xs; let xs = self.norm.forward(xs)?; let (inner_dim, xs) = match &self.proj_in { Proj::Conv2d(p) => { let xs = p.forward(&xs)?; let inner_dim = xs.dim(1)?; let xs = xs .transpose(1, 2)? .t()? .reshape((batch, height * weight, inner_dim))?; (inner_dim, xs) } Proj::Linear(p) => { let inner_dim = xs.dim(1)?; let xs = xs .transpose(1, 2)? .t()? .reshape((batch, height * weight, inner_dim))?; (inner_dim, p.forward(&xs)?) } }; let mut xs = xs; for block in self.transformer_blocks.iter() { xs = block.forward(&xs, context)? } let xs = match &self.proj_out { Proj::Conv2d(p) => p.forward( &xs.reshape((batch, height, weight, inner_dim))? .t()? .transpose(1, 2)?, )?, Proj::Linear(p) => p .forward(&xs)? .reshape((batch, height, weight, inner_dim))? .t()? .transpose(1, 2)?, }; xs + residual } } /// Configuration for an attention block. #[derive(Debug, Clone, Copy)] pub struct AttentionBlockConfig { pub num_head_channels: Option<usize>, pub num_groups: usize, pub rescale_output_factor: f64, pub eps: f64, } impl Default for AttentionBlockConfig { fn default() -> Self { Self { num_head_channels: None, num_groups: 32, rescale_output_factor: 1., eps: 1e-5, } } } #[derive(Debug)] pub struct AttentionBlock { group_norm: nn::GroupNorm, query: nn::Linear, key: nn::Linear, value: nn::Linear, proj_attn: nn::Linear, channels: usize, num_heads: usize, span: tracing::Span, config: AttentionBlockConfig, } // In the .safetensor weights of official Stable Diffusion 3 Medium Huggingface repo // https://huggingface.co/stabilityai/stable-diffusion-3-medium // Linear layer may use a different dimension for the weight in the linear, which is // incompatible with the current implementation of the nn::linear constructor. // This is a workaround to handle the different dimensions. fn get_qkv_linear(channels: usize, vs: nn::VarBuilder) -> Result<nn::Linear> { match vs.get((channels, channels), "weight") { Ok(_) => nn::linear(channels, channels, vs), Err(_) => { let weight = vs .get((channels, channels, 1, 1), "weight")? .reshape((channels, channels))?; let bias = vs.get((channels,), "bias")?; Ok(nn::Linear::new(weight, Some(bias))) } } } impl AttentionBlock { pub fn new(vs: nn::VarBuilder, channels: usize, config: AttentionBlockConfig) -> Result<Self> { let num_head_channels = config.num_head_channels.unwrap_or(channels); let num_heads = channels / num_head_channels; let group_norm = nn::group_norm(config.num_groups, channels, config.eps, vs.pp("group_norm"))?; let (q_path, k_path, v_path, out_path) = if vs.contains_tensor("to_q.weight") { ("to_q", "to_k", "to_v", "to_out.0") } else { ("query", "key", "value", "proj_attn") }; let query = get_qkv_linear(channels, vs.pp(q_path))?; let key = get_qkv_linear(channels, vs.pp(k_path))?; let value = get_qkv_linear(channels, vs.pp(v_path))?; let proj_attn = get_qkv_linear(channels, vs.pp(out_path))?; let span = tracing::span!(tracing::Level::TRACE, "attn-block"); Ok(Self { group_norm, query, key, value, proj_attn, channels, num_heads, span, config, }) } fn transpose_for_scores(&self, xs: Tensor) -> Result<Tensor> { let (batch, t, h_times_d) = xs.dims3()?; xs.reshape((batch, t, self.num_heads, h_times_d / self.num_heads))? .transpose(1, 2) } } impl Module for AttentionBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let in_dtype = xs.dtype(); let residual = xs; let (batch, channel, height, width) = xs.dims4()?; let xs = self .group_norm .forward(xs)? .reshape((batch, channel, height * width))? .transpose(1, 2)?; let query_proj = self.query.forward(&xs)?; let key_proj = self.key.forward(&xs)?; let value_proj = self.value.forward(&xs)?; let query_states = self .transpose_for_scores(query_proj)? .to_dtype(DType::F32)?; let key_states = self.transpose_for_scores(key_proj)?.to_dtype(DType::F32)?; let value_states = self .transpose_for_scores(value_proj)? .to_dtype(DType::F32)?; // scale is applied twice, hence the -0.25 here rather than -0.5. // https://github.com/huggingface/diffusers/blob/d3d22ce5a894becb951eec03e663951b28d45135/src/diffusers/models/attention.py#L87 let scale = f64::powf(self.channels as f64 / self.num_heads as f64, -0.25); let attention_scores = (query_states * scale)?.matmul(&(key_states.t()? * scale)?)?; let attention_probs = nn::ops::softmax(&attention_scores, D::Minus1)?; // TODO: revert the call to force_contiguous once the three matmul kernels have been // adapted to handle layout with some dims set to 1. let xs = attention_probs.matmul(&value_states)?; let xs = xs.to_dtype(in_dtype)?; let xs = xs.transpose(1, 2)?.contiguous()?; let xs = xs.flatten_from(D::Minus2)?; let xs = self .proj_attn .forward(&xs)? .t()? .reshape((batch, channel, height, width))?; (xs + residual)? / self.config.rescale_output_factor } }
candle/candle-transformers/src/models/stable_diffusion/attention.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/attention.rs", "repo_id": "candle", "token_count": 9788 }
//! Stella v5 model implementation. //! //! Stella is a dense text embedding model optimized for retrieval and similarity tasks. //! This implementation provides support for multiple embedding dimensions. //! //! Key characteristics: //! - Dense text embeddings optimized for similarity search //! - Multiple output dimension support (256 to 8192) //! - Grouped query attention (GQA) //! - RMSNorm for layer normalization //! - Rotary positional embeddings (RoPE) //! //! References: //! - [MRL Framework](https://arxiv.org/abs/2205.13147) //! - [Model Card](https://huggingface.co/dunzhang/stella_en_1.5B_v5) //! use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm}; use candle::{DType, Device, Error, IndexOp, Module, Result, Tensor, D}; use candle_nn::{layer_norm, Activation, LayerNorm, VarBuilder}; use std::sync::Arc; // internal representation for identifying which model is being used #[derive(Debug, Copy, Clone, PartialEq, serde::Deserialize)] pub enum ModelVariant { Large, // 1.5B Small, // 400M } impl Default for ModelVariant { fn default() -> Self { Self::Large } } // Same as `qwen2` family of models with the exception being the `embed_head` // The final `output` causal modelling head is swapped with a learned `dense` layer, `embed_head` #[derive(Debug, Default, Clone, PartialEq, serde::Deserialize)] pub struct Config { pub variant: ModelVariant, pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub max_position_embeddings: usize, pub rope_theta: f64, pub embed_head: EmbedHead, pub norm_eps: f64, // RMSNorm for 1.5B || LayerNorm for 400M pub activation_fn: Activation, // Silu for 1.5B || Gelu for 400M // Unique to 1.5B pub num_key_value_heads: usize, // Unique to 400M pub type_vocab_size: usize, pub scaling_factor: f64, } // Excerpt from `stella` model card: // `Stella_en_1.5B_v5` models have been trained on [MRL](https://arxiv.org/abs/2205.13147) enabling multiple output dimensions // Embed head represents the config for various embedding dims supported #[derive(Debug, Default, Clone, PartialEq, serde::Deserialize)] pub struct EmbedHead { pub in_features: usize, pub out_features: usize, } /// An enum variant representing the Embedding head dimensions `stella` is trained on /// As the [model-card](https://huggingface.co/dunzhang/stella_en_1.5B_v5#introduction) suggests, D1024 is good enough for most cases #[derive(Debug, Clone, Copy)] pub enum EmbedDim { Dim256, Dim768, Dim1024, Dim2048, Dim4096, Dim6144, Dim8192, } impl Default for EmbedDim { fn default() -> Self { Self::Dim1024 } } impl EmbedDim { pub fn config(&self, in_features: usize) -> EmbedHead { EmbedHead { in_features, out_features: match &self { Self::Dim256 => 256, Self::Dim768 => 768, Self::Dim1024 => 1024, Self::Dim2048 => 2048, Self::Dim4096 => 4096, Self::Dim6144 => 6144, Self::Dim8192 => 8192, }, } } } // Initialize a new `stella_en` model - with 400M variant or 1.5B variant impl Config { /// Initialize a new `stella_en_1.5B_v5`` model with given embedding dim pub fn new_1_5_b_v5(embed_dim: EmbedDim) -> Self { // Representing config.json at https://huggingface.co/dunzhang/stella_en_1.5B_v5/blob/main/config.json // Removed `sliding_window` related config which is basically being carried forward from `qwen2` but not used here Self { variant: ModelVariant::Large, activation_fn: candle_nn::Activation::Silu, vocab_size: 151646, hidden_size: 1536, intermediate_size: 8960, num_hidden_layers: 28, num_attention_heads: 12, num_key_value_heads: 2, max_position_embeddings: 131072, rope_theta: 1000000., norm_eps: 1e-06, embed_head: embed_dim.config(1536), ..Default::default() } } /// Initialize new `stella_en_400M_v5` pub fn new_400_m_v5(embed_dim: EmbedDim) -> Self { Self { variant: ModelVariant::Small, vocab_size: 30528, hidden_size: 1024, intermediate_size: 4096, num_hidden_layers: 24, num_attention_heads: 16, max_position_embeddings: 8192, type_vocab_size: 2, norm_eps: 1e-12, scaling_factor: 2.0, rope_theta: 160000.0, activation_fn: Activation::Gelu, embed_head: embed_dim.config(1024), ..Default::default() } } } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; // Factoring in `scaling factor` for `400M` variant let max_seq_len = if cfg.scaling_factor == 0. { cfg.max_position_embeddings } else { ((cfg.max_position_embeddings as f64) * cfg.scaling_factor) as usize }; // let rot_dim = if cfg.variant == ModelVariant::Small { dim / 2 } else { dim }; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| { // Scaled rope_theta for 400M variant let rope_theta = if cfg.scaling_factor == 0. { cfg.rope_theta } else { cfg.rope_theta * cfg.scaling_factor }; let mut freq = 1. / rope_theta.powf(i as f64 / dim as f64); if cfg.scaling_factor != 0. { freq /= cfg.scaling_factor.powf(2.0 / (dim as f64)) } freq as f32 }) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; // Calculate position embeddings with scaled sequence length let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; // if cfg.variant == ModelVariant::Small { // freqs = Tensor::cat(&[&freqs, &freqs], 1)? // } Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } // TODO: re-visit this fn apply_rotary_emb_qkv(&self, q: &Tensor, k: &Tensor) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, 0, seq_len)?; let sin = self.sin.narrow(0, 0, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { variant: ModelVariant, gate_proj: Linear, up_proj: Option<Linear>, // `up_proj` only for 1.5B variant down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let (gate_proj, up_proj, down_proj) = match cfg.variant { ModelVariant::Large => ( linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?, Some(linear_no_bias( hidden_sz, intermediate_sz, vb.pp("up_proj"), )?), linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?, ), ModelVariant::Small => ( linear_no_bias(hidden_sz, intermediate_sz * 2, vb.pp("up_gate_proj"))?, None, linear(intermediate_sz, hidden_sz, vb.pp("down_proj"))?, ), }; Ok(Self { variant: cfg.variant, gate_proj, up_proj, down_proj, act_fn: cfg.activation_fn, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let up = self.gate_proj.forward(xs)?; let (lhs, rhs) = match self.variant { ModelVariant::Large => { let lhs = up.apply(&self.act_fn)?; let rhs = xs.apply(self.up_proj.as_ref().unwrap())?; (lhs, rhs) } ModelVariant::Small => { // Get the dimensions let (_batch_size, _seq_len, hidden_dim) = up.dims3()?; let split_size = hidden_dim / 2; // Split along the last dimension (hidden_dim) let up_states = up.narrow(2, 0, split_size)?; let gate = up.narrow(2, split_size, split_size)?.apply(&self.act_fn)?; (up_states, gate) } }; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { qkv_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, variant: ModelVariant, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = if num_kv_heads > 0 { num_heads / num_kv_heads } else { 0 }; let head_dim = hidden_sz / num_heads; let (qkv_proj, o_proj) = match cfg.variant { ModelVariant::Large => { // The 1.5B variant comes with separate `q, k, v` layers, let's merge it and standardize // Weights let q_w = vb .pp("q_proj") .get((num_heads * head_dim, hidden_sz), "weight")?; let k_w = vb .pp("k_proj") .get((num_kv_heads * head_dim, hidden_sz), "weight")?; let v_w = vb .pp("v_proj") .get((num_kv_heads * head_dim, hidden_sz), "weight")?; // Biases let q_b = vb.pp("q_proj").get(num_heads * head_dim, "bias")?; let k_b = vb.pp("k_proj").get(num_kv_heads * head_dim, "bias")?; let v_b = vb.pp("v_proj").get(num_kv_heads * head_dim, "bias")?; let qkv_w = Tensor::cat(&[&q_w, &k_w, &v_w], 0)?; let qkv_b = Tensor::cat(&[&q_b, &k_b, &v_b], 0)?; ( Linear::from_weights(qkv_w, Some(qkv_b)), linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?, ) } ModelVariant::Small => ( linear(hidden_sz, 3 * num_heads * head_dim, vb.pp("qkv_proj"))?, linear(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?, ), }; Ok(Self { qkv_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, variant: cfg.variant, }) } fn forward(&mut self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let qkv = self.qkv_proj.forward(xs)?; let n_kv_heads = match self.variant { ModelVariant::Large => self.num_kv_heads, ModelVariant::Small => self.num_heads, }; let (query_states, key_states, value_states) = match self.variant { ModelVariant::Large => { let q_sz = self.num_heads * self.head_dim; let kv_sz = n_kv_heads * self.head_dim; let q = qkv.narrow(D::Minus1, 0, q_sz)?.reshape(( b_sz, q_len, self.num_heads, self.head_dim, ))?; let k = qkv.narrow(D::Minus1, q_sz, kv_sz)?.reshape(( b_sz, q_len, n_kv_heads, self.head_dim, ))?; let v = qkv.narrow(D::Minus1, q_sz + kv_sz, kv_sz)?.reshape(( b_sz, q_len, n_kv_heads, self.head_dim, ))?; (q, k, v) } ModelVariant::Small => { // Split into Q, K, V and reshape to match PyTorch shapes let qkv = qkv.reshape((b_sz, q_len, 3, self.num_heads, self.head_dim))?; ( qkv.i((.., .., 0, .., ..))?, qkv.i((.., .., 1, .., ..))?, qkv.i((.., .., 2, .., ..))?, ) } }; let query_states = query_states.transpose(1, 2)?.contiguous()?; let key_states = key_states.transpose(1, 2)?.contiguous()?; let value_states = value_states.transpose(1, 2)?.contiguous()?; let (query_states, key_states) = self .rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states)?; // The 1.5B is expected to have grouped query attention let (key_states, value_states) = if self.variant == ModelVariant::Large { ( crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?, crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?, ) } else { (key_states, value_states) }; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = query_states.matmul(&key_states.transpose(2, 3)?)?; let attn_weights = (attn_weights * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } } #[derive(Debug, Clone)] enum NormType { Layer(LayerNorm), Rms(RmsNorm), } #[derive(Debug, Clone)] struct Layer { variant: ModelVariant, attention: Attention, mlp: MLP, // For 1.5B: this is `input_layernorm` // For 400M: this is `output_layernorm` layernorm: NormType, post_attention_layernorm: NormType, } impl Layer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention = Attention::new( rotary_emb, cfg, vb.pp(if cfg.variant == ModelVariant::Large { "self_attn" } else { "attention" }), )?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let (layernorm, post_attention_layernorm) = match cfg.variant { ModelVariant::Large => ( NormType::Rms(RmsNorm::new( cfg.hidden_size, cfg.norm_eps, vb.pp("input_layernorm"), )?), NormType::Rms(RmsNorm::new( cfg.hidden_size, cfg.norm_eps, vb.pp("post_attention_layernorm"), )?), ), ModelVariant::Small => ( NormType::Layer(layer_norm( cfg.hidden_size, candle_nn::LayerNormConfig { eps: cfg.norm_eps, ..Default::default() }, vb.pp("mlp_ln"), )?), NormType::Layer(layer_norm( cfg.hidden_size, candle_nn::LayerNormConfig { eps: cfg.norm_eps, ..Default::default() }, vb.pp("attn_ln"), )?), ), }; Ok(Self { variant: cfg.variant, attention, mlp, layernorm, post_attention_layernorm, }) } fn forward(&mut self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { // Here, the application of normalizations and activation calculations differ // For Large [1.5B]: // residual = x // state = other_layernorm(xs) // state = attention(state) // state += residual // residual = state // state = mlp(attention_layernorm(state)) // -> residual + state // For Small [400M]: // residual = x; // state = attention(x) // state += residual // state = attention_layernorm(state) // residual = state // state = mlp(state) // state += residual // -> other_layernorm(state) let residual = xs; match self.variant { ModelVariant::Large => { let (attn_ln, input_ln) = if let (NormType::Rms(attn_ln), NormType::Rms(input_ln)) = (&self.post_attention_layernorm, &self.layernorm) { (attn_ln, input_ln) } else { return Err(candle::error::Error::Msg( "Stella 1.5B expects RMSNorm".to_string(), )); }; let xs = input_ln.forward(xs)?; let xs = (self.attention.forward(&xs, attention_mask)? + residual)?; let residual = &xs; let xs = xs.apply(attn_ln)?.apply(&self.mlp)?; residual + xs } ModelVariant::Small => { let (attn_ln, output_ln) = if let (NormType::Layer(attn_ln), NormType::Layer(input_ln)) = (&self.post_attention_layernorm, &self.layernorm) { (attn_ln, input_ln) } else { return Err(candle::error::Error::Msg( "Stella 400M expects RMSNorm".to_string(), )); }; let xs = (self.attention.forward(xs, attention_mask)? + residual)?; let xs = attn_ln.forward(&xs)?; let residual = &xs; let xs = (self.mlp.forward(&xs)? + residual)?; output_ln.forward(&xs) } } } } #[derive(Debug, Clone)] pub struct Embeddings { variant: ModelVariant, // For 1.5B: this is the `embed_tokens` // For 400M: this is the `word_embeddings` embeddings: candle_nn::Embedding, // folloing are specifically for 400M token_type_embeddings: Option<candle_nn::Embedding>, layer_norm: Option<LayerNorm>, position_ids: Option<Tensor>, } impl Embeddings { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let (embeddings, token_type_embeddings, layer_norm, position_ids) = match cfg.variant { ModelVariant::Large => ( candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?, None, None, None, ), ModelVariant::Small => { let vb = vb.pp("embeddings"); let weight = vb.pp("LayerNorm").get_with_hints( cfg.hidden_size, "weight", candle_nn::Init::Const(1.0), )?; let bias = vb.pp("LayerNorm").get_with_hints( cfg.hidden_size, "bias", candle_nn::Init::Const(0.0), )?; let dev = bias.device().clone(); let layer_norm = candle_nn::LayerNorm::new(weight, bias, cfg.norm_eps); ( candle_nn::embedding( cfg.vocab_size, cfg.hidden_size, vb.pp("word_embeddings"), )?, Some(candle_nn::embedding( cfg.type_vocab_size, cfg.hidden_size, vb.pp("token_type_embeddings"), )?), Some(layer_norm), Some(Tensor::arange( 0u32, cfg.max_position_embeddings as u32, &dev, )?), ) } }; Ok(Self { variant: cfg.variant, embeddings, token_type_embeddings, layer_norm, position_ids, }) } } impl Module for Embeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let embd = self.embeddings.forward(xs)?; // For 1.5B just forward the embeddings if self.variant == ModelVariant::Large { return Ok(embd); } let (token_type_embed, layer_norm, pos_ids) = if let (Some(token_type_embd), Some(layer_norm), Some(position_ids)) = ( &self.token_type_embeddings, &self.layer_norm, &self.position_ids, ) { (token_type_embd, layer_norm, position_ids) } else { return Err(Error::Msg( "Stella 400M requires `token_type_embeddings`, `layer_norm` and `position_ids`" .to_string(), )); }; let (batch_size, seq_length) = xs.dims2()?; let pos_ids = pos_ids .as_ref() .narrow(0, 0, seq_length)? .expand((batch_size, seq_length))?; layer_norm.forward(&embd.add(&token_type_embed.forward(&pos_ids.zeros_like()?)?)?) } } #[derive(Debug, Clone)] pub struct Model { embeddings: Embeddings, layers: Vec<Layer>, norm: Option<RmsNorm>, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = match cfg.variant { ModelVariant::Large => vb.pp("model"), ModelVariant::Small => vb.pp("new"), }; // let embed_tokens = // candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let embeddings = Embeddings::new(cfg, vb_m.clone())?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = match cfg.variant { ModelVariant::Large => vb_m.pp("layers"), ModelVariant::Small => vb_m.pp("encoder").pp("layer"), }; for layer_idx in 0..cfg.num_hidden_layers { let layer = Layer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = match cfg.variant { ModelVariant::Large => Some(RmsNorm::new( cfg.hidden_size, cfg.norm_eps, vb_m.pp("norm"), )?), ModelVariant::Small => None, }; Ok(Self { embeddings, layers, norm, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_attention_mask(&self, attn_mask: &Tensor) -> Result<Tensor> { let (b_sz, sql_len) = attn_mask.dims2()?; let mut mask: Vec<Tensor> = vec![]; for b in 0..b_sz { mask.push(attn_mask.i((b, ..))?.expand((1, 1, sql_len, sql_len))?); } let mask = Tensor::cat(&mask, 0)?; let on_true = mask.zeros_like()?.to_dtype(self.dtype)?; let on_false = Tensor::new(f32::NEG_INFINITY, &self.device)? .broadcast_as(mask.shape())? .to_dtype(self.dtype)?; mask.where_cond(&on_true, &on_false) } pub fn forward(&mut self, input_ids: &Tensor, mask: &Tensor) -> Result<Tensor> { let (_, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { // This is not a `causal language modelling` task, we'll need to prepare a `non-causal` attention Some(self.prepare_attention_mask(mask)?) }; let mut xs = self.embeddings.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref())? } if let Some(n) = &self.norm { xs.apply(n) } else { Ok(xs) } } } #[derive(Debug)] pub struct EmbeddingModel { base_model: Model, lm_head: Linear, } impl EmbeddingModel { pub fn new(cfg: &Config, base_vb: VarBuilder, embed_vb: VarBuilder) -> Result<Self> { let base_model = Model::new(cfg, base_vb.clone())?; let lm_head = linear( cfg.embed_head.in_features, cfg.embed_head.out_features, embed_vb.pp("linear"), )?; Ok(Self { base_model, lm_head, }) } pub fn forward(&mut self, input_ids: &Tensor, mask: &Tensor) -> Result<Tensor> { let x = self.base_model.forward(input_ids, mask)?; let x = self.pool(&x, mask)?; // No matter what keeping the final activations as F32 helps with the accuracy self.lm_head.forward(&x.to_dtype(DType::F32)?) // [B_sz, dim_size] } /// Same as forward pass but normalizes the output pub fn forward_norm(&mut self, input_ids: &Tensor, mask: &Tensor) -> Result<Tensor> { let x = self.forward(input_ids, mask)?; // Normalize x.broadcast_div(&x.sqr()?.sum_keepdim(1)?.sqrt()?) } fn pool(&self, x: &Tensor, mask: &Tensor) -> Result<Tensor> { let mask = mask.to_dtype(x.dtype())?; // [B_Sz, Seq_len] let (batch_size, seq_len, hidden_dim) = x.dims3()?; // expanding the shape of the mask from [B_Sz, Seq_len] -> [B_Sz, Seq_len, Hidden_size] let mask_expanded = mask .unsqueeze(2)? .broadcast_as((batch_size, seq_len, hidden_dim))?; // [B_Sz, Seq_len, Hidden_dim] let x = (x * &mask_expanded)?; // Sum let sum_mask = mask .sum(1)? .unsqueeze(1)? .expand((batch_size, hidden_dim))?; x.sum(1)? / sum_mask } }
candle/candle-transformers/src/models/stella_en_v5.rs/0
{ "file_path": "candle/candle-transformers/src/models/stella_en_v5.rs", "repo_id": "candle", "token_count": 14806 }
use super::common::{AttnBlock, ResBlock, TimestepBlock}; use candle::{DType, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug)] struct Block { res_block: ResBlock, ts_block: TimestepBlock, attn_block: AttnBlock, } #[derive(Debug)] pub struct WPrior { projection: candle_nn::Conv2d, cond_mapper_lin1: candle_nn::Linear, cond_mapper_lin2: candle_nn::Linear, blocks: Vec<Block>, out_ln: super::common::WLayerNorm, out_conv: candle_nn::Conv2d, c_r: usize, } impl WPrior { #[allow(clippy::too_many_arguments)] pub fn new( c_in: usize, c: usize, c_cond: usize, c_r: usize, depth: usize, nhead: usize, use_flash_attn: bool, vb: VarBuilder, ) -> Result<Self> { let projection = candle_nn::conv2d(c_in, c, 1, Default::default(), vb.pp("projection"))?; let cond_mapper_lin1 = candle_nn::linear(c_cond, c, vb.pp("cond_mapper.0"))?; let cond_mapper_lin2 = candle_nn::linear(c, c, vb.pp("cond_mapper.2"))?; let out_ln = super::common::WLayerNorm::new(c)?; let out_conv = candle_nn::conv2d(c, c_in * 2, 1, Default::default(), vb.pp("out.1"))?; let mut blocks = Vec::with_capacity(depth); for index in 0..depth { let res_block = ResBlock::new(c, 0, 3, vb.pp(format!("blocks.{}", 3 * index)))?; let ts_block = TimestepBlock::new(c, c_r, vb.pp(format!("blocks.{}", 3 * index + 1)))?; let attn_block = AttnBlock::new( c, c, nhead, true, use_flash_attn, vb.pp(format!("blocks.{}", 3 * index + 2)), )?; blocks.push(Block { res_block, ts_block, attn_block, }) } Ok(Self { projection, cond_mapper_lin1, cond_mapper_lin2, blocks, out_ln, out_conv, c_r, }) } pub fn gen_r_embedding(&self, r: &Tensor) -> Result<Tensor> { const MAX_POSITIONS: usize = 10000; let r = (r * MAX_POSITIONS as f64)?; let half_dim = self.c_r / 2; let emb = (MAX_POSITIONS as f64).ln() / (half_dim - 1) as f64; let emb = (Tensor::arange(0u32, half_dim as u32, r.device())?.to_dtype(DType::F32)? * -emb)? .exp()?; let emb = r.unsqueeze(1)?.broadcast_mul(&emb.unsqueeze(0)?)?; let emb = Tensor::cat(&[emb.sin()?, emb.cos()?], 1)?; let emb = if self.c_r % 2 == 1 { emb.pad_with_zeros(D::Minus1, 0, 1)? } else { emb }; emb.to_dtype(r.dtype()) } pub fn forward(&self, xs: &Tensor, r: &Tensor, c: &Tensor) -> Result<Tensor> { let x_in = xs; let mut xs = xs.apply(&self.projection)?; let c_embed = c .apply(&self.cond_mapper_lin1)? .apply(&|xs: &_| candle_nn::ops::leaky_relu(xs, 0.2))? .apply(&self.cond_mapper_lin2)?; let r_embed = self.gen_r_embedding(r)?; for block in self.blocks.iter() { xs = block.res_block.forward(&xs, None)?; xs = block.ts_block.forward(&xs, &r_embed)?; xs = block.attn_block.forward(&xs, &c_embed)?; } let ab = xs.apply(&self.out_ln)?.apply(&self.out_conv)?.chunk(2, 1)?; (x_in - &ab[0])? / ((&ab[1] - 1.)?.abs()? + 1e-5) } }
candle/candle-transformers/src/models/wuerstchen/prior.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/prior.rs", "repo_id": "candle", "token_count": 1920 }
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::bert::{BertModel, Config}; use candle_wasm_example_bert::console_log; use tokenizers::{PaddingParams, Tokenizer}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Model { bert: BertModel, tokenizer: Tokenizer, } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn load(weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>) -> Result<Model, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let device = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, device)?; let config: Config = serde_json::from_slice(&config)?; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let bert = BertModel::load(vb, &config)?; Ok(Self { bert, tokenizer }) } pub fn get_embeddings(&mut self, input: JsValue) -> Result<JsValue, JsError> { let input: Params = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let sentences = input.sentences; let normalize_embeddings = input.normalize_embeddings; let device = &Device::Cpu; if let Some(pp) = self.tokenizer.get_padding_mut() { pp.strategy = tokenizers::PaddingStrategy::BatchLongest } else { let pp = PaddingParams { strategy: tokenizers::PaddingStrategy::BatchLongest, ..Default::default() }; self.tokenizer.with_padding(Some(pp)); } let tokens = self .tokenizer .encode_batch(sentences.to_vec(), true) .map_err(|m| JsError::new(&m.to_string()))?; let token_ids: Vec<Tensor> = tokens .iter() .map(|tokens| { let tokens = tokens.get_ids().to_vec(); Tensor::new(tokens.as_slice(), device) }) .collect::<Result<Vec<_>, _>>()?; let attention_mask: Vec<Tensor> = tokens .iter() .map(|tokens| { let tokens = tokens.get_attention_mask().to_vec(); Tensor::new(tokens.as_slice(), device) }) .collect::<Result<Vec<_>, _>>()?; let token_ids = Tensor::stack(&token_ids, 0)?; let attention_mask = Tensor::stack(&attention_mask, 0)?; let token_type_ids = token_ids.zeros_like()?; console_log!("running inference on batch {:?}", token_ids.shape()); let embeddings = self .bert .forward(&token_ids, &token_type_ids, Some(&attention_mask))?; console_log!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if normalize_embeddings { embeddings.broadcast_div(&embeddings.sqr()?.sum_keepdim(1)?.sqrt()?)? } else { embeddings }; let embeddings_data = embeddings.to_vec2()?; Ok(serde_wasm_bindgen::to_value(&Embeddings { data: embeddings_data, })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct Embeddings { data: Vec<Vec<f32>>, } #[derive(serde::Serialize, serde::Deserialize)] pub struct Params { sentences: Vec<String>, normalize_embeddings: bool, } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/bert/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/bert/src/bin/m.rs", "repo_id": "candle", "token_count": 1752 }
import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "llama2c-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Llama2C { static instance = {}; static async getInstance(weightsURL, modelID, tokenizerURL) { // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), ]); this.instance[modelID] = new Model(weightsArrayU8, tokenizerArrayU8); } return this.instance[modelID]; } } let controller = null; self.addEventListener("message", (event) => { if (event.data.command === "start") { controller = new AbortController(); generate(event.data); } else if (event.data.command === "abort") { controller.abort(); } }); async function generate(data) { const { weightsURL, modelID, tokenizerURL, prompt, temp, top_p, repeatPenalty, seed, maxSeqLen, } = data; try { self.postMessage({ status: "loading", message: "Starting llama2.c" }); const model = await Llama2C.getInstance(weightsURL, modelID, tokenizerURL); self.postMessage({ status: "loading", message: "Initializing model" }); const firstToken = model.init_with_prompt( prompt, temp, top_p, repeatPenalty, seed ); const seq_len = model.get_seq_len(); let sentence = firstToken; let maxTokens = maxSeqLen ? maxSeqLen : seq_len - prompt.length - 1; let startTime = performance.now(); let tokensCount = 0; while (tokensCount < maxTokens) { await new Promise(async (resolve) => { if (controller && controller.signal.aborted) { self.postMessage({ status: "aborted", message: "Aborted", output: prompt + sentence, }); return; } const token = await model.next_token(); const tokensSec = ((tokensCount + 1) / (performance.now() - startTime)) * 1000; sentence += token; self.postMessage({ status: "generating", message: "Generating token", token: token, sentence: sentence, totalTime: performance.now() - startTime, tokensSec, prompt: prompt, }); setTimeout(resolve, 0); }); tokensCount++; } self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); } catch (e) { self.postMessage({ error: e }); } }
candle/candle-wasm-examples/llama2-c/llama2cWorker.js/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/llama2cWorker.js", "repo_id": "candle", "token_count": 1223 }
[package] name = "candle-wasm-example-phi" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } tokenizers = { workspace = true, features = ["unstable_wasm"] } num-traits = { workspace = true } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } getrandom = { version = "0.2", features = ["js"] } image = { workspace = true } log = { workspace = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" wasm-bindgen = "0.2.87" js-sys = "0.3.64"
candle/candle-wasm-examples/phi/Cargo.toml/0
{ "file_path": "candle/candle-wasm-examples/phi/Cargo.toml", "repo_id": "candle", "token_count": 278 }
//load Candle Bert Module wasm module let init, ModelConditionalGeneration; async function fetchArrayBuffer(url) { const cacheName = "t5-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class ConditionalGeneration { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (modelID.includes("quantized")) { ({ default: init, ModelConditionalGeneration } = await import( "./build/m-quantized.js" )); } else { ({ default: init, ModelConditionalGeneration } = await import( "./build/m.js" )); } if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new ModelConditionalGeneration( weightsArrayU8, tokenizerArrayU8, configArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, prompt, params } = event.data; let { temperature = 0.0, seed = 299792458, repeat_penalty = 1.1, repeat_last_n = 64, top_p = 1, } = { ...params }; try { self.postMessage({ status: "ready", message: "Starting T5 Conditional Generation", }); const model = await ConditionalGeneration.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "decoding", message: "Decoding Prompt", }); const output = model.decode({ prompt, temperature, seed, top_p, repeat_penalty, repeat_last_n, }); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/t5/T5ModelConditionalGeneration.js/0
{ "file_path": "candle/candle-wasm-examples/t5/T5ModelConditionalGeneration.js", "repo_id": "candle", "token_count": 980 }
pub const NAMES: [&str; 80] = [ "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush", ];
candle/candle-wasm-examples/yolo/src/coco_classes.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/coco_classes.rs", "repo_id": "candle", "token_count": 648 }
{ "editor.formatOnSave": true, "editor.defaultFormatter": "esbenp.prettier-vscode", "editor.codeActionsOnSave": { "source.fixAll": "explicit" }, "eslint.validate": ["javascript", "svelte"], "[svelte]": { "editor.defaultFormatter": "esbenp.prettier-vscode" } }
chat-ui/.vscode/settings.json/0
{ "file_path": "chat-ui/.vscode/settings.json", "repo_id": "chat-ui", "token_count": 118 }
{{- if $.Values.monitoring.enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: {{ include "labels.standard" . | nindent 4 }} name: {{ include "name" . }} namespace: {{ .Release.Namespace }} spec: selector: matchLabels: {{ include "labels.standard" . | nindent 6 }} endpoints: - port: metrics path: /metrics interval: 15s {{- end }}
chat-ui/chart/templates/service-monitor.yaml/0
{ "file_path": "chat-ui/chart/templates/service-monitor.yaml", "repo_id": "chat-ui", "token_count": 144 }
# Ollama | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | No | | [Multimodal](../multimodal) | No | We also support the Ollama inference server. Spin up a model with ```bash ollama run mistral ``` Then specify the endpoints like so: ```ini MODELS=`[ { "name": "Ollama Mistral", "chatPromptTemplate": "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s> {{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.1, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 50, "truncate": 3072, "max_new_tokens": 1024, "stop": ["</s>"] }, "endpoints": [ { "type": "ollama", "url" : "http://127.0.0.1:11434", "ollamaName" : "mistral" } ] } ]` ```
chat-ui/docs/source/configuration/models/providers/ollama.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/ollama.md", "repo_id": "chat-ui", "token_count": 468 }
<script lang="ts"> import CopyToClipBoardBtn from "./CopyToClipBoardBtn.svelte"; import DOMPurify from "isomorphic-dompurify"; import hljs from "highlight.js"; interface Props { code?: string; lang?: string; } let { code = "", lang = "" }: Props = $props(); let highlightedCode = $derived(hljs.highlightAuto(code, hljs.getLanguage(lang)?.aliases).value); </script> <div class="group relative my-4 rounded-lg"> <pre class="scrollbar-custom overflow-auto px-5 scrollbar-thumb-gray-500 hover:scrollbar-thumb-gray-400 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20"><code ><!-- eslint-disable svelte/no-at-html-tags -->{@html DOMPurify.sanitize( highlightedCode )}</code ></pre> <CopyToClipBoardBtn classNames="btn rounded-lg border border-gray-200 px-2 py-2 text-sm shadow-sm transition-all hover:border-gray-300 active:shadow-inner dark:border-gray-700 dark:hover:border-gray-500 absolute top-2 right-2 invisible opacity-0 group-hover:visible group-hover:opacity-100 dark:text-gray-700 text-gray-200" value={code} /> </div>
chat-ui/src/lib/components/CodeBlock.svelte/0
{ "file_path": "chat-ui/src/lib/components/CodeBlock.svelte", "repo_id": "chat-ui", "token_count": 401 }
<script lang="ts"> import { onMount, onDestroy } from "svelte"; interface Props { children?: import("svelte").Snippet; } let { children }: Props = $props(); let el: HTMLElement | undefined = $state(); onMount(() => { el?.ownerDocument.body.appendChild(el); }); onDestroy(() => { if (el?.parentNode) { el.parentNode.removeChild(el); } }); </script> <div bind:this={el} class="contents" hidden> {@render children?.()} </div>
chat-ui/src/lib/components/Portal.svelte/0
{ "file_path": "chat-ui/src/lib/components/Portal.svelte", "repo_id": "chat-ui", "token_count": 179 }
<script lang="ts"> import { createEventDispatcher } from "svelte"; import { base } from "$app/paths"; import { goto } from "$app/navigation"; import type { Model } from "$lib/types/Model"; import type { Assistant } from "$lib/types/Assistant"; import { useSettingsStore } from "$lib/stores/settings"; import { formatUserCount } from "$lib/utils/formatUserCount"; import IconGear from "~icons/bi/gear-fill"; import IconInternet from "../icons/IconInternet.svelte"; import CarbonExport from "~icons/carbon/export"; import CarbonCheckmark from "~icons/carbon/checkmark"; import CarbonRenew from "~icons/carbon/renew"; import CarbonUserMultiple from "~icons/carbon/user-multiple"; import CarbonTools from "~icons/carbon/tools"; import { share } from "$lib/utils/share"; import { env as envPublic } from "$env/dynamic/public"; import { page } from "$app/state"; interface Props { models: Model[]; assistant: Pick< Assistant, | "avatar" | "name" | "rag" | "dynamicPrompt" | "modelId" | "createdByName" | "exampleInputs" | "_id" | "description" | "userCount" | "tools" >; } let { models, assistant }: Props = $props(); const dispatch = createEventDispatcher<{ message: string }>(); let hasRag = $derived( assistant?.rag?.allowAllDomains || (assistant?.rag?.allowedDomains?.length ?? 0) > 0 || (assistant?.rag?.allowedLinks?.length ?? 0) > 0 || assistant?.dynamicPrompt ); const prefix = envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || page.url.origin}${base}`; let shareUrl = $derived(`${prefix}/assistant/${assistant?._id}`); let isCopied = $state(false); const settings = useSettingsStore(); </script> <div class="flex h-full w-full flex-col content-center items-center justify-center pb-52"> <div class="relative mt-auto rounded-2xl bg-gray-100 text-gray-600 dark:border-gray-800 dark:bg-gray-800/60 dark:text-gray-300" > <div class="mt-3 flex min-w-[80dvw] items-center gap-4 p-4 pr-1 sm:min-w-[440px] md:p-8 xl:gap-8" > {#if assistant.avatar} <img src={`${base}/settings/assistants/${assistant._id.toString()}/avatar.jpg?hash=${ assistant.avatar }`} alt="avatar" class="size-16 flex-none rounded-full object-cover max-sm:self-start md:size-32" /> {:else} <div class="flex size-12 flex-none items-center justify-center rounded-full bg-gray-300 object-cover text-xl font-bold uppercase text-gray-500 dark:bg-gray-600 max-sm:self-start sm:text-4xl md:size-32" > {assistant?.name[0]} </div> {/if} <div class="flex h-full flex-col gap-2 text-balance"> <p class="-mb-1">Assistant</p> <p class="text-xl font-bold sm:text-2xl">{assistant.name}</p> {#if assistant.description} <p class="line-clamp-6 text-sm text-gray-500 dark:text-gray-400"> {assistant.description} </p> {/if} {#if assistant?.tools?.length} <div class="flex h-5 w-fit items-center gap-1 rounded-full bg-purple-500/10 pl-1 pr-2 text-xs" title="This assistant uses the websearch." > <CarbonTools class="text-sm text-purple-600" /> Has tools </div> {/if} {#if hasRag} <div class="flex h-5 w-fit items-center gap-1 rounded-full bg-blue-500/10 pl-1 pr-2 text-xs" title="This assistant uses the websearch." > <IconInternet classNames="text-sm text-blue-600" /> Has internet access </div> {/if} {#if assistant.createdByName} <div class="pt-1 text-sm text-gray-400 dark:text-gray-500"> Created by <a class="hover:underline" href="{base}/assistants?user={assistant.createdByName}"> {assistant.createdByName} </a> {#if assistant.userCount && assistant.userCount > 1} <span class="mx-1">Β·</span> <div class="inline-flex items-baseline gap-1 text-sm text-gray-400 dark:text-gray-500" title="Number of users" > <CarbonUserMultiple class="text-xxs" />{formatUserCount(assistant.userCount)} users </div> {/if} </div> {/if} </div> </div> <div class="absolute right-3 top-3 md:right-4 md:top-4"> <div class="flex flex-row items-center gap-1"> <button class="flex h-7 items-center gap-1.5 rounded-full border bg-white px-2.5 py-1 text-gray-800 shadow-sm hover:shadow-inner dark:border-gray-700 dark:bg-gray-700 dark:text-gray-300/90 dark:hover:bg-gray-800 max-sm:px-1.5 md:text-sm" onclick={() => { if (!isCopied) { share(shareUrl, assistant.name); isCopied = true; setTimeout(() => { isCopied = false; }, 2000); } }} > {#if isCopied} <CarbonCheckmark class="text-xxs text-green-600 max-sm:text-xs" /> <span class="text-green-600 max-sm:hidden"> Link copied </span> {:else} <CarbonExport class="text-xxs max-sm:text-xs" /> <span class="max-sm:hidden"> Share </span> {/if} </button> <a href="{base}/settings/assistants/{assistant._id.toString()}" class="flex h-7 items-center gap-1.5 rounded-full border bg-white px-2.5 py-1 text-gray-800 shadow-sm hover:shadow-inner dark:border-gray-700 dark:bg-gray-700 dark:text-gray-300/90 dark:hover:bg-gray-800 md:text-sm" ><IconGear class="text-xxs" />Settings</a > </div> </div> <button onclick={() => { settings.instantSet({ activeModel: models[0].name, }); goto(`${base}/`); }} class="absolute -bottom-6 right-2 inline-flex items-center justify-center text-xs text-gray-600 underline hover:brightness-50 dark:text-gray-400 dark:hover:brightness-110" > <CarbonRenew class="mr-1.5 text-xxs" /> Reset to default model </button> </div> {#if assistant.exampleInputs} <div class="mx-auto mt-auto w-full gap-8 sm:-mb-8"> <div class="md:col-span-2 md:mt-6"> <div class="grid grid-cols-1 gap-3 {assistant.exampleInputs.length > 1 ? 'md:grid-cols-2' : ''}" > {#each assistant.exampleInputs as example} <button type="button" class="truncate whitespace-nowrap rounded-xl border bg-gray-50 px-3 py-2 text-left text-smd text-gray-600 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-300 dark:hover:bg-gray-700" onclick={() => dispatch("message", example)} > {example} </button> {/each} </div> </div> </div> {/if} </div>
chat-ui/src/lib/components/chat/AssistantIntroduction.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/AssistantIntroduction.svelte", "repo_id": "chat-ui", "token_count": 2837 }
import type { Migration } from "."; import { collections } from "$lib/server/database"; import { ObjectId, type AnyBulkWriteOperation } from "mongodb"; import type { Assistant } from "$lib/types/Assistant"; import { generateSearchTokens } from "$lib/utils/searchTokens"; const migration: Migration = { _id: new ObjectId("5f9f3e3e3e3e3e3e3e3e3e3e"), name: "Update search assistants", up: async () => { const { assistants } = collections; let ops: AnyBulkWriteOperation<Assistant>[] = []; for await (const assistant of assistants .find() .project<Pick<Assistant, "_id" | "name">>({ _id: 1, name: 1 })) { ops.push({ updateOne: { filter: { _id: assistant._id, }, update: { $set: { searchTokens: generateSearchTokens(assistant.name), }, }, }, }); if (ops.length >= 1000) { process.stdout.write("."); await assistants.bulkWrite(ops, { ordered: false }); ops = []; } } if (ops.length) { await assistants.bulkWrite(ops, { ordered: false }); } return true; }, down: async () => { const { assistants } = collections; await assistants.updateMany({}, { $unset: { searchTokens: "" } }); return true; }, }; export default migration;
chat-ui/src/lib/migrations/routines/01-update-search-assistants.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/01-update-search-assistants.ts", "repo_id": "chat-ui", "token_count": 483 }
import { z } from "zod"; import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints"; import { chunk } from "$lib/utils/chunk"; import { env } from "$env/dynamic/private"; export const embeddingEndpointOpenAIParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("openai"), url: z.string().url().default("https://api.openai.com/v1/embeddings"), apiKey: z.string().default(env.OPENAI_API_KEY), defaultHeaders: z.record(z.string()).default({}), }); export async function embeddingEndpointOpenAI( input: z.input<typeof embeddingEndpointOpenAIParametersSchema> ): Promise<EmbeddingEndpoint> { const { url, model, apiKey, defaultHeaders } = embeddingEndpointOpenAIParametersSchema.parse(input); const maxBatchSize = model.maxBatchSize || 100; return async ({ inputs }) => { const requestURL = new URL(url); const batchesInputs = chunk(inputs, maxBatchSize); const batchesResults = await Promise.all( batchesInputs.map(async (batchInputs) => { const response = await fetch(requestURL, { method: "POST", headers: { Accept: "application/json", "Content-Type": "application/json", ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}), ...defaultHeaders, }, body: JSON.stringify({ input: batchInputs, model: model.name }), }); const embeddings: Embedding[] = []; const responseObject = await response.json(); for (const embeddingObject of responseObject.data) { embeddings.push(embeddingObject.embedding); } return embeddings; }) ); const flatAllEmbeddings = batchesResults.flat(); return flatAllEmbeddings; }; }
chat-ui/src/lib/server/embeddingEndpoints/openai/embeddingEndpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/openai/embeddingEndpoints.ts", "repo_id": "chat-ui", "token_count": 620 }
import { buildPrompt } from "$lib/buildPrompt"; import { z } from "zod"; import type { Endpoint } from "../endpoints"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import { logger } from "$lib/server/logger"; export const endpointLangserveParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("langserve"), url: z.string().url(), }); export function endpointLangserve( input: z.input<typeof endpointLangserveParametersSchema> ): Endpoint { const { url, model } = endpointLangserveParametersSchema.parse(input); return async ({ messages, preprompt, continueMessage }) => { const prompt = await buildPrompt({ messages, continueMessage, preprompt, model, }); const r = await fetch(`${url}/stream`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ input: { text: prompt }, }), }); if (!r.ok) { throw new Error(`Failed to generate text: ${await r.text()}`); } const encoder = new TextDecoderStream(); const reader = r.body?.pipeThrough(encoder).getReader(); return (async function* () { let stop = false; let generatedText = ""; let tokenId = 0; let accumulatedData = ""; // Buffer to accumulate data chunks while (!stop) { // Read the stream and log the outputs to console const out = (await reader?.read()) ?? { done: false, value: undefined }; // If it's done, we cancel if (out.done) { reader?.cancel(); return; } if (!out.value) { return; } // Accumulate the data chunk accumulatedData += out.value; // Keep read data to check event type const eventData = out.value; // Process each complete JSON object in the accumulated data while (accumulatedData.includes("\n")) { // Assuming each JSON object ends with a newline const endIndex = accumulatedData.indexOf("\n"); let jsonString = accumulatedData.substring(0, endIndex).trim(); // Remove the processed part from the buffer accumulatedData = accumulatedData.substring(endIndex + 1); // Stopping with end event if (eventData.startsWith("event: end")) { stop = true; yield { token: { id: tokenId++, text: "", logprob: 0, special: true, }, generated_text: generatedText, details: null, } satisfies TextGenerationStreamOutput; reader?.cancel(); continue; } if (eventData.startsWith("event: data") && jsonString.startsWith("data: ")) { jsonString = jsonString.slice(6); let data = null; // Handle the parsed data try { data = JSON.parse(jsonString); } catch (e) { logger.error(e, "Failed to parse JSON"); logger.error(jsonString, "Problematic JSON string:"); continue; // Skip this iteration and try the next chunk } // Assuming content within data is a plain string if (data) { generatedText += data; const output: TextGenerationStreamOutput = { token: { id: tokenId++, text: data, logprob: 0, special: false, }, generated_text: null, details: null, }; yield output; } } } } })(); }; } export default endpointLangserve;
chat-ui/src/lib/server/endpoints/langserve/endpointLangserve.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/langserve/endpointLangserve.ts", "repo_id": "chat-ui", "token_count": 1394 }
import { env } from "$env/dynamic/private"; import { generateFromDefaultEndpoint } from "$lib/server/generateFromDefaultEndpoint"; import type { EndpointMessage } from "../endpoints/endpoints"; import { logger } from "$lib/server/logger"; import { MessageUpdateType, type MessageUpdate } from "$lib/types/MessageUpdate"; import type { Conversation } from "$lib/types/Conversation"; import { getReturnFromGenerator } from "$lib/utils/getReturnFromGenerator"; export async function* generateTitleForConversation( conv: Conversation ): AsyncGenerator<MessageUpdate, undefined, undefined> { try { const userMessage = conv.messages.find((m) => m.from === "user"); // HACK: detect if the conversation is new if (conv.title !== "New Chat" || !userMessage) return; const prompt = userMessage.content; const title = (await generateTitle(prompt)) ?? "New Chat"; yield { type: MessageUpdateType.Title, title, }; } catch (cause) { logger.error(Error("Failed whilte generating title for conversation", { cause })); } } export async function generateTitle(prompt: string) { if (env.LLM_SUMMARIZATION !== "true") { return prompt.split(/\s+/g).slice(0, 5).join(" "); } const messages: Array<EndpointMessage> = [ { from: "system", content: "You are a summarization AI. You'll never answer a user's question directly, but instead summarize the user's request into a single short sentence of four words or less. Always start your answer with an emoji relevant to the summary", }, { from: "user", content: "Who is the president of Gabon?" }, { from: "assistant", content: "πŸ‡¬πŸ‡¦ President of Gabon" }, { from: "user", content: "Who is Julien Chaumond?" }, { from: "assistant", content: "πŸ§‘ Julien Chaumond" }, { from: "user", content: "what is 1 + 1?" }, { from: "assistant", content: "πŸ”’ Simple math operation" }, { from: "user", content: "What are the latest news?" }, { from: "assistant", content: "πŸ“° Latest news" }, { from: "user", content: "How to make a great cheesecake?" }, { from: "assistant", content: "🍰 Cheesecake recipe" }, { from: "user", content: "what is your favorite movie? do a short answer." }, { from: "assistant", content: "πŸŽ₯ Favorite movie" }, { from: "user", content: "Explain the concept of artificial intelligence in one sentence" }, { from: "assistant", content: "πŸ€– AI definition" }, { from: "user", content: "Draw a cute cat" }, { from: "assistant", content: "🐱 Cute cat drawing" }, { from: "user", content: prompt }, ]; return await getReturnFromGenerator( generateFromDefaultEndpoint({ messages, preprompt: "You are a summarization AI. Summarize the user's request into a single short sentence of four words or less. Do not try to answer it, only summarize the user's query. Always start your answer with an emoji relevant to the summary", generateSettings: { max_new_tokens: 15, }, }) ) .then((summary) => { // add an emoji if none is found in the first three characters if (!/\p{Emoji}/u.test(summary.slice(0, 3))) { return "πŸ’¬ " + summary; } return summary; }) .catch((e) => { logger.error(e); return null; }); }
chat-ui/src/lib/server/textGeneration/title.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/title.ts", "repo_id": "chat-ui", "token_count": 1069 }
/* eslint-disable-next-line no-shadow */ export enum MarkdownElementType { Header = "HEADER", Paragraph = "PARAGRAPH", BlockQuote = "BLOCKQUOTE", CodeBlock = "CODE_BLOCK", UnorderedList = "UNORDERED_LIST", OrderedList = "ORDERED_LIST", UnorderedListItem = "UNORDERED_LIST_ITEM", OrderedListItem = "ORDERED_LIST_ITEM", } interface BaseMarkdownElement<T = MarkdownElementType> { type: T; content: string; parent: HeaderElement | null; } export interface HeaderElement extends BaseMarkdownElement<MarkdownElementType.Header> { level: number; children: MarkdownElement[]; } type ListItem = MarkdownElementType.UnorderedListItem | MarkdownElementType.OrderedListItem; interface ListItemElement extends BaseMarkdownElement<ListItem> { depth: number; } interface BlockQuoteElement extends BaseMarkdownElement<MarkdownElementType.BlockQuote> { depth: number; } interface ParagraphElement extends BaseMarkdownElement<MarkdownElementType.Paragraph> {} interface CodeBlockElement extends BaseMarkdownElement<MarkdownElementType.CodeBlock> {} export type MarkdownElement = | HeaderElement | ParagraphElement | BlockQuoteElement | CodeBlockElement | ListItemElement; export const tagNameMap: Record<string, MarkdownElementType> = { h1: MarkdownElementType.Header, h2: MarkdownElementType.Header, h3: MarkdownElementType.Header, h4: MarkdownElementType.Header, h5: MarkdownElementType.Header, h6: MarkdownElementType.Header, div: MarkdownElementType.Paragraph, p: MarkdownElementType.Paragraph, blockquote: MarkdownElementType.BlockQuote, pre: MarkdownElementType.CodeBlock, ul: MarkdownElementType.UnorderedList, ol: MarkdownElementType.OrderedList, li: MarkdownElementType.UnorderedListItem, };
chat-ui/src/lib/server/websearch/markdown/types.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/types.ts", "repo_id": "chat-ui", "token_count": 541 }
import { JSDOM, VirtualConsole } from "jsdom"; import { isURL } from "$lib/utils/isUrl"; import type { WebSearchSource } from "$lib/types/WebSearch"; export default async function searchWebLocal(query: string): Promise<WebSearchSource[]> { const abortController = new AbortController(); setTimeout(() => abortController.abort(), 10000); const htmlString = await fetch( "https://www.google.com/search?hl=en&q=" + encodeURIComponent(query), { signal: abortController.signal } ) .then((response) => response.text()) .catch(); const virtualConsole = new VirtualConsole(); virtualConsole.on("error", () => {}); // No-op to skip console errors. const document = new JSDOM(htmlString ?? "", { virtualConsole }).window.document; // get all links const links = document.querySelectorAll("a"); if (!links.length) throw new Error(`webpage doesn't have any "a" element`); // take url that start wirth /url?q= // and do not contain google.com links // and strip them up to '&sa=' const linksHref = Array.from(links) .map((el) => el.href) .filter((link) => link.startsWith("/url?q=") && !link.includes("google.com/")) .map((link) => link.slice("/url?q=".length, link.indexOf("&sa="))) .filter(isURL); // remove duplicate links and map links to the correct object shape return [...new Set(linksHref)].map((link) => ({ link })); }
chat-ui/src/lib/server/websearch/search/endpoints/webLocal.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/webLocal.ts", "repo_id": "chat-ui", "token_count": 439 }
import type { Timestamps } from "./Timestamps"; import type { Assistant } from "./Assistant"; export interface AssistantStats extends Timestamps { assistantId: Assistant["_id"]; date: { at: Date; span: "hour"; }; count: number; }
chat-ui/src/lib/types/AssistantStats.ts/0
{ "file_path": "chat-ui/src/lib/types/AssistantStats.ts", "repo_id": "chat-ui", "token_count": 80 }
import type { Model } from "$lib/types/Model"; import { AutoTokenizer, PreTrainedTokenizer } from "@huggingface/transformers"; export async function getTokenizer(_modelTokenizer: Exclude<Model["tokenizer"], undefined>) { if (typeof _modelTokenizer === "string") { // return auto tokenizer return await AutoTokenizer.from_pretrained(_modelTokenizer); } else { // construct & return pretrained tokenizer const { tokenizerUrl, tokenizerConfigUrl } = _modelTokenizer satisfies { tokenizerUrl: string; tokenizerConfigUrl: string; }; const tokenizerJSON = await (await fetch(tokenizerUrl)).json(); const tokenizerConfig = await (await fetch(tokenizerConfigUrl)).json(); return new PreTrainedTokenizer(tokenizerJSON, tokenizerConfig); } }
chat-ui/src/lib/utils/getTokenizer.ts/0
{ "file_path": "chat-ui/src/lib/utils/getTokenizer.ts", "repo_id": "chat-ui", "token_count": 229 }
import type { Message } from "$lib/types/Message"; import Handlebars from "handlebars"; Handlebars.registerHelper("ifUser", function (this: Pick<Message, "from" | "content">, options) { if (this.from == "user") return options.fn(this); }); Handlebars.registerHelper( "ifAssistant", function (this: Pick<Message, "from" | "content">, options) { if (this.from == "assistant") return options.fn(this); } ); export function compileTemplate<T>(input: string, model: { preprompt: string }) { const template = Handlebars.compile<T>(input, { knownHelpers: { ifUser: true, ifAssistant: true }, knownHelpersOnly: true, noEscape: true, strict: true, preventIndent: true, }); return function render(inputs: T, options?: RuntimeOptions) { return template({ ...model, ...inputs }, options); }; }
chat-ui/src/lib/utils/template.ts/0
{ "file_path": "chat-ui/src/lib/utils/template.ts", "repo_id": "chat-ui", "token_count": 266 }
import type { LayoutServerLoad } from "./$types"; import { collections } from "$lib/server/database"; import type { Conversation } from "$lib/types/Conversation"; import { UrlDependency } from "$lib/types/UrlDependency"; import { defaultModel, models, oldModels, validateModel } from "$lib/server/models"; import { authCondition, requiresUser } from "$lib/server/auth"; import { DEFAULT_SETTINGS } from "$lib/types/Settings"; import { env } from "$env/dynamic/private"; import { ObjectId } from "mongodb"; import type { ConvSidebar } from "$lib/types/ConvSidebar"; import { toolFromConfigs } from "$lib/server/tools"; import { MetricsServer } from "$lib/server/metrics"; import type { ToolFront, ToolInputFile } from "$lib/types/Tool"; import { ReviewStatus } from "$lib/types/Review"; import { base } from "$app/paths"; export const load: LayoutServerLoad = async ({ locals, depends, fetch }) => { depends(UrlDependency.ConversationList); const settings = await collections.settings.findOne(authCondition(locals)); // If the active model in settings is not valid, set it to the default model. This can happen if model was disabled. if ( settings && !validateModel(models).safeParse(settings?.activeModel).success && !settings.assistants?.map((el) => el.toString())?.includes(settings?.activeModel) ) { settings.activeModel = defaultModel.id; await collections.settings.updateOne(authCondition(locals), { $set: { activeModel: defaultModel.id }, }); } // if the model is unlisted, set the active model to the default model if ( settings?.activeModel && models.find((m) => m.id === settings?.activeModel)?.unlisted === true ) { settings.activeModel = defaultModel.id; await collections.settings.updateOne(authCondition(locals), { $set: { activeModel: defaultModel.id }, }); } const enableAssistants = env.ENABLE_ASSISTANTS === "true"; const assistantActive = !models.map(({ id }) => id).includes(settings?.activeModel ?? ""); const assistant = assistantActive ? await collections.assistants.findOne({ _id: new ObjectId(settings?.activeModel), }) : null; const nConversations = await collections.conversations.countDocuments(authCondition(locals)); const conversations = nConversations === 0 ? Promise.resolve([]) : fetch(`${base}/api/conversations`) .then((res) => res.json()) .then( ( convs: Pick<Conversation, "_id" | "title" | "updatedAt" | "model" | "assistantId">[] ) => convs.map((conv) => ({ ...conv, updatedAt: new Date(conv.updatedAt), })) ); const userAssistants = settings?.assistants?.map((assistantId) => assistantId.toString()) ?? []; const userAssistantsSet = new Set(userAssistants); const assistants = conversations.then((conversations) => collections.assistants .find({ _id: { $in: [ ...userAssistants.map((el) => new ObjectId(el)), ...(conversations.map((conv) => conv.assistantId).filter((el) => !!el) as ObjectId[]), ], }, }) .toArray() ); const messagesBeforeLogin = env.MESSAGES_BEFORE_LOGIN ? parseInt(env.MESSAGES_BEFORE_LOGIN) : 0; let loginRequired = false; if (requiresUser && !locals.user) { if (messagesBeforeLogin === 0) { loginRequired = true; } else if (nConversations >= messagesBeforeLogin) { loginRequired = true; } else { // get the number of messages where `from === "assistant"` across all conversations. const totalMessages = ( await collections.conversations .aggregate([ { $match: { ...authCondition(locals), "messages.from": "assistant" } }, { $project: { messages: 1 } }, { $limit: messagesBeforeLogin + 1 }, { $unwind: "$messages" }, { $match: { "messages.from": "assistant" } }, { $count: "messages" }, ]) .toArray() )[0]?.messages ?? 0; loginRequired = totalMessages >= messagesBeforeLogin; } } const toolUseDuration = (await MetricsServer.getMetrics().tool.toolUseDuration.get()).values; const configToolIds = toolFromConfigs.map((el) => el._id.toString()); let activeCommunityToolIds = (settings?.tools ?? []).filter( (key) => !configToolIds.includes(key) ); if (assistant) { activeCommunityToolIds = [...activeCommunityToolIds, ...(assistant.tools ?? [])]; } const communityTools = await collections.tools .find({ _id: { $in: activeCommunityToolIds.map((el) => new ObjectId(el)) } }) .toArray() .then((tools) => tools.map((tool) => ({ ...tool, isHidden: false, isOnByDefault: true, isLocked: true, })) ); return { nConversations, conversations: await conversations.then( async (convs) => await Promise.all( convs.map(async (conv) => { if (settings?.hideEmojiOnSidebar) { conv.title = conv.title.replace(/\p{Emoji}/gu, ""); } // remove invalid unicode and trim whitespaces conv.title = conv.title.replace(/\uFFFD/gu, "").trimStart(); let avatarUrl: string | undefined = undefined; if (conv.assistantId) { const hash = ( await collections.assistants.findOne({ _id: new ObjectId(conv.assistantId), }) )?.avatar; if (hash) { avatarUrl = `/settings/assistants/${conv.assistantId}/avatar.jpg?hash=${hash}`; } } return { id: conv._id.toString(), title: conv.title, model: conv.model ?? defaultModel, updatedAt: conv.updatedAt, assistantId: conv.assistantId?.toString(), avatarUrl, } satisfies ConvSidebar; }) ) ), settings: { searchEnabled: !!( env.SERPAPI_KEY || env.SERPER_API_KEY || env.SERPSTACK_API_KEY || env.SEARCHAPI_KEY || env.YDC_API_KEY || env.USE_LOCAL_WEBSEARCH || env.SEARXNG_QUERY_URL || env.BING_SUBSCRIPTION_KEY ), ethicsModalAccepted: !!settings?.ethicsModalAcceptedAt, ethicsModalAcceptedAt: settings?.ethicsModalAcceptedAt ?? null, activeModel: settings?.activeModel ?? DEFAULT_SETTINGS.activeModel, hideEmojiOnSidebar: settings?.hideEmojiOnSidebar ?? false, shareConversationsWithModelAuthors: settings?.shareConversationsWithModelAuthors ?? DEFAULT_SETTINGS.shareConversationsWithModelAuthors, customPrompts: settings?.customPrompts ?? {}, assistants: userAssistants, tools: settings?.tools ?? toolFromConfigs .filter((el) => !el.isHidden && el.isOnByDefault) .map((el) => el._id.toString()), disableStream: settings?.disableStream ?? DEFAULT_SETTINGS.disableStream, directPaste: settings?.directPaste ?? DEFAULT_SETTINGS.directPaste, }, models: models.map((model) => ({ id: model.id, name: model.name, websiteUrl: model.websiteUrl, modelUrl: model.modelUrl, tokenizer: model.tokenizer, datasetName: model.datasetName, datasetUrl: model.datasetUrl, displayName: model.displayName, description: model.description, reasoning: !!model.reasoning, logoUrl: model.logoUrl, promptExamples: model.promptExamples, parameters: model.parameters, preprompt: model.preprompt, multimodal: model.multimodal, multimodalAcceptedMimetypes: model.multimodalAcceptedMimetypes, tools: model.tools, unlisted: model.unlisted, hasInferenceAPI: model.hasInferenceAPI, })), oldModels, tools: [...toolFromConfigs, ...communityTools] .filter((tool) => !tool?.isHidden) .map( (tool) => ({ _id: tool._id.toString(), type: tool.type, displayName: tool.displayName, name: tool.name, description: tool.description, mimeTypes: (tool.inputs ?? []) .filter((input): input is ToolInputFile => input.type === "file") .map((input) => (input as ToolInputFile).mimeTypes) .flat(), isOnByDefault: tool.isOnByDefault ?? true, isLocked: tool.isLocked ?? true, timeToUseMS: toolUseDuration.find( (el) => el.labels.tool === tool._id.toString() && el.labels.quantile === 0.9 )?.value ?? 15_000, color: tool.color, icon: tool.icon, }) satisfies ToolFront ), communityToolCount: await collections.tools.countDocuments({ type: "community", review: ReviewStatus.APPROVED, }), assistants: assistants.then((assistants) => assistants .filter((el) => userAssistantsSet.has(el._id.toString())) .map((el) => ({ ...el, _id: el._id.toString(), createdById: undefined, createdByMe: el.createdById.toString() === (locals.user?._id ?? locals.sessionId).toString(), })) ), user: locals.user && { id: locals.user._id.toString(), username: locals.user.username, avatarUrl: locals.user.avatarUrl, email: locals.user.email, logoutDisabled: locals.user.logoutDisabled, isAdmin: locals.user.isAdmin ?? false, isEarlyAccess: locals.user.isEarlyAccess ?? false, }, assistant: assistant ? JSON.parse(JSON.stringify(assistant)) : null, enableAssistants, enableAssistantsRAG: env.ENABLE_ASSISTANTS_RAG === "true", enableCommunityTools: env.COMMUNITY_TOOLS === "true", loginRequired, loginEnabled: requiresUser, guestMode: requiresUser && messagesBeforeLogin > 0, }; };
chat-ui/src/routes/+layout.server.ts/0
{ "file_path": "chat-ui/src/routes/+layout.server.ts", "repo_id": "chat-ui", "token_count": 3689 }
<script lang="ts"> import { page } from "$app/state"; import { base } from "$app/paths"; import { goto } from "$app/navigation"; import { onMount } from "svelte"; import { env as envPublic } from "$env/dynamic/public"; import ChatWindow from "$lib/components/chat/ChatWindow.svelte"; import { findCurrentModel } from "$lib/utils/models"; import { useSettingsStore } from "$lib/stores/settings"; import { ERROR_MESSAGES, error } from "$lib/stores/errors"; import { pendingMessage } from "$lib/stores/pendingMessage"; let { data } = $props(); let loading = $state(false); let files: File[] = $state([]); const settings = useSettingsStore(); const modelId = page.params.model; async function createConversation(message: string) { try { loading = true; const res = await fetch(`${base}/conversation`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ model: data.assistant.modelId, assistantId: data.assistant._id, }), }); if (!res.ok) { error.set("Error while creating conversation, try again."); console.error("Error while creating conversation: " + (await res.text())); return; } const { conversationId } = await res.json(); // Ugly hack to use a store as temp storage, feel free to improve ^^ pendingMessage.set({ content: message, files, }); // invalidateAll to update list of conversations await goto(`${base}/conversation/${conversationId}`, { invalidateAll: true }); } catch (err) { error.set(ERROR_MESSAGES.default); console.error(err); } finally { loading = false; } } onMount(async () => { settings.instantSet({ activeModel: modelId, }); const query = page.url.searchParams.get("q"); if (query) createConversation(query); }); </script> <svelte:head> <meta property="og:title" content={data.assistant.name + " - " + envPublic.PUBLIC_APP_NAME} /> <meta property="og:type" content="link" /> <meta property="og:description" content={`Use the ${data.assistant.name} assistant inside of ${envPublic.PUBLIC_APP_NAME}`} /> <meta property="og:image" content="{envPublic.PUBLIC_ORIGIN || page.url.origin}{base}/assistant/{data.assistant ._id}/thumbnail.png" /> <meta property="og:url" content={page.url.href} /> <meta name="twitter:card" content="summary_large_image" /> </svelte:head> <ChatWindow on:message={(ev) => createConversation(ev.detail)} {loading} currentModel={findCurrentModel([...data.models, ...data.oldModels], data.assistant.modelId)} assistant={data.assistant} models={data.models} bind:files />
chat-ui/src/routes/assistant/[assistantId]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/assistant/[assistantId]/+page.svelte", "repo_id": "chat-ui", "token_count": 970 }
import { redirect } from "@sveltejs/kit"; import { getOIDCAuthorizationUrl } from "$lib/server/auth"; import { base } from "$app/paths"; import { env } from "$env/dynamic/private"; export const actions = { async default({ url, locals, request }) { const referer = request.headers.get("referer"); let redirectURI = `${(referer ? new URL(referer) : url).origin}${base}/login/callback`; // TODO: Handle errors if provider is not responding if (url.searchParams.has("callback")) { const callback = url.searchParams.get("callback") || redirectURI; if (env.ALTERNATIVE_REDIRECT_URLS.includes(callback)) { redirectURI = callback; } } const authorizationUrl = await getOIDCAuthorizationUrl( { redirectURI }, { sessionId: locals.sessionId } ); redirect(303, authorizationUrl); }, };
chat-ui/src/routes/login/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/login/+page.server.ts", "repo_id": "chat-ui", "token_count": 280 }
import { base } from "$app/paths"; import { redirect } from "@sveltejs/kit"; export async function load({ parent, params }) { const data = await parent(); const model = data.models.find((m: { id: string }) => m.id === params.model); if (!model || model.unlisted) { redirect(302, `${base}/settings`); } return data; }
chat-ui/src/routes/settings/(nav)/[...model]/+page.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/[...model]/+page.ts", "repo_id": "chat-ui", "token_count": 111 }
<script lang="ts"> import { ToolOutputComponents, type CommunityToolEditable, type ToolInput, } from "$lib/types/Tool"; import { createEventDispatcher, onMount } from "svelte"; import { browser } from "$app/environment"; import ToolLogo from "$lib/components/ToolLogo.svelte"; import { colors, icons } from "$lib/utils/tools"; import { applyAction, enhance } from "$app/forms"; import { getGradioApi } from "$lib/utils/getGradioApi"; import { useSettingsStore } from "$lib/stores/settings"; import { goto } from "$app/navigation"; import { base } from "$app/paths"; import ToolInputComponent from "./ToolInputComponent.svelte"; import CarbonInformation from "~icons/carbon/information"; type ActionData = { error?: boolean; errors?: { field: string | number; message: string; }[]; } | null; interface Props { tool?: CommunityToolEditable | undefined; readonly?: boolean; form: ActionData; } let { tool = undefined, readonly = false, form = $bindable() }: Props = $props(); function getError(field: string, returnForm: ActionData) { return returnForm?.errors?.find((error) => error.field === field)?.message ?? ""; } let APIloading = $state(false); let formLoading = $state(false); const dispatch = createEventDispatcher<{ close: void }>(); onMount(async () => { await updateConfig(); }); let spaceUrl = $state(tool?.baseUrl ?? ""); let editableTool: CommunityToolEditable = $state( tool ?? { displayName: "", description: "", // random color & icon for new tools color: colors[Math.floor(Math.random() * colors.length)], icon: icons[Math.floor(Math.random() * icons.length)], baseUrl: "", endpoint: "", name: "", inputs: [], outputComponent: null, outputComponentIdx: 0, showOutput: true, } ); $effect(() => { editableTool.baseUrl && (spaceUrl = editableTool.baseUrl); }); async function updateConfig() { if (!browser || !editableTool.baseUrl || !editableTool.endpoint) { return; } form = { error: false, errors: [] }; APIloading = true; const api = await getGradioApi(editableTool.baseUrl); const newInputs = api.named_endpoints[editableTool.endpoint].parameters.map((param, idx) => { if (tool?.inputs[idx]?.name === param.parameter_name) { // if the tool has the same name, we use the tool's type return { ...tool?.inputs[idx], } satisfies ToolInput; } const type = parseValidInputType(param.python_type.type); if (param.parameter_has_default && param.python_type.type !== "filepath") { // optional if it has a default return { name: param.parameter_name, description: param.description, paramType: "optional" as const, default: param.parameter_default, ...(type === "file" ? { mimeTypes: "*/*", type } : { type }), }; } else { // required if it doesn't have a default return { name: param.parameter_name, description: param.description, paramType: "required" as const, ...(type === "file" ? { mimeTypes: "*/*", type } : { type }), }; } }); editableTool.inputs = newInputs; // outout components const parsedOutputComponent = ToolOutputComponents.safeParse( api.named_endpoints[editableTool.endpoint].returns?.[0]?.component ?? null ); if (parsedOutputComponent.success) { editableTool.outputComponent = "0;" + parsedOutputComponent.data; } else { form = { error: true, errors: [ { field: "outputComponent", message: `Invalid output component. Type ${ api.named_endpoints[editableTool.endpoint].returns?.[0]?.component } is not yet supported. Feel free to report this issue so we can add support for it.`, }, ], }; editableTool.outputComponent = null; } APIloading = false; } async function onEndpointChange(e: Event) { const target = e.target as HTMLInputElement; editableTool.endpoint = target.value; editableTool.name = target.value.replace(/\//g, ""); await updateConfig(); } function parseValidInputType(type: string) { switch (type) { case "str": case "int": case "float": case "bool": return type; case "filepath": return "file" as const; default: return "str"; } } const settings = useSettingsStore(); let formSubmittable = $derived( editableTool.name && editableTool.baseUrl && editableTool.outputComponent ); </script> <form method="POST" class="relative flex h-full flex-col overflow-y-auto p-4 md:p-8" use:enhance={async ({ formData }) => { formLoading = true; formData.append("tool", JSON.stringify(editableTool)); return async ({ result }) => { if (result.type === "success" && result.data && typeof result.data.toolId === "string") { $settings.tools = [...($settings.tools ?? []), result.data.toolId]; await goto(`${base}/tools/${result.data.toolId}`).then(() => { formLoading = false; }); } else { await applyAction(result).then(() => { formLoading = false; }); } }; }} > {#if tool} <h2 class="text-xl font-semibold"> {readonly ? "View" : "Edit"} Tool: {tool.displayName} </h2> {#if !readonly} <p class="mb-6 text-sm text-gray-500"> Modifying an existing tool will propagate the changes to all users. </p> {/if} {:else} <h2 class="text-xl font-semibold">Create new tool</h2> <p class="mb-6 text-sm text-gray-500"> Create and share your own tools. All tools are <span class="rounded-full border px-2 py-0.5 leading-none">public</span > </p> {/if} <div class="grid h-full w-full flex-1 grid-cols-2 gap-6 text-sm max-sm:grid-cols-1"> <div class="col-span-1 flex flex-col gap-4"> <div class="flex flex-col gap-4"> <label> <div class="mb-1 font-semibold">Tool Display Name</div> <input type="text" name="displayName" disabled={readonly} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="Image generator" bind:value={editableTool.displayName} /> <p class="text-xs text-red-500">{getError("displayName", form)}</p> </label> <div class="flex flex-row gap-4"> <div> {#key editableTool.color + editableTool.icon} <ToolLogo color={editableTool.color} icon={editableTool.icon} /> {/key} </div> <label class="flex-grow"> <div class="mb-1 font-semibold">Icon</div> <select name="icon" disabled={readonly} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" bind:value={editableTool.icon} > {#each icons as icon} <option value={icon}>{icon}</option> {/each} </select> <p class="text-xs text-red-500">{getError("icon", form)}</p> </label> <label class="flex-grow"> <div class="mb-1 font-semibold">Color scheme</div> <select name="color" disabled={readonly} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" bind:value={editableTool.color} > {#each colors as color} <option value={color}>{color}</option> {/each} </select> <p class="text-xs text-red-500">{getError("color", form)}</p> </label> </div> <label> <div class=" font-semibold">Tool Description</div> <p class="mb-1 text-sm text-gray-500"> This description will be passed to the model when picking tools. Describe what your tool does and when it is appropriate to use. </p> <textarea name="description" disabled={readonly} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="This tool lets you generate images using SDXL." bind:value={editableTool.description} ></textarea> <p class="text-xs text-red-500">{getError("description", form)}</p> </label> <label> <div class="mb-1 font-semibold">Hugging Face Space URL</div> <p class="mb-1 text-sm text-gray-500"> Specify the Hugging Face Space where your tool is hosted. <a href="https://huggingface.co/spaces" target="_blank" class="underline">See trending spaces here</a >. </p> <input type="text" name="spaceUrl" disabled={readonly} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="ByteDance/Hyper-SDXL-1Step-T2I" bind:value={editableTool.baseUrl} /> <p class="text-xs text-red-500">{getError("spaceUrl", form)}</p> </label> <p class="text-justify text-gray-800"> Tools allows models that support them to use external application directly via function calling. Tools must use Hugging Face Gradio Spaces as we detect the input and output types automatically from the <a class="underline" href="https://www.gradio.app/guides/sharing-your-app#api-page">Gradio API</a >. For GPU intensive tool consider using a ZeroGPU Space. </p> </div> </div> <div class="col-span-1 flex flex-col gap-4"> <div class="flex flex-col gap-2"> <h3 class="mb-1 font-semibold">Functions</h3> {#if editableTool.baseUrl} <p class="text-sm text-gray-500">Choose functions that can be called in your tool.</p> {:else} <p class="text-sm text-gray-500">Start by specifying a Hugging Face Space URL.</p> {/if} {#if editableTool.baseUrl} {#await getGradioApi(spaceUrl)} <p class="text-sm text-gray-500">Loading...</p> {:then api} <div class="flex flex-row flex-wrap gap-4"> {#each Object.keys(api["named_endpoints"] ?? {}) as name} <label class="rounded-lg bg-gray-200 p-2"> <input type="radio" disabled={readonly} oninput={onEndpointChange} bind:group={editableTool.endpoint} value={name} name="endpoint" /> <span class="font-mono text-gray-800" class:font-semibold={editableTool.endpoint === name}>{name}</span > </label> {/each} </div> {#if editableTool.endpoint && api["named_endpoints"][editableTool.endpoint] && !APIloading} {@const endpoint = api["named_endpoints"][editableTool.endpoint]} <div class="flex flex-col gap-2"> <div class="flex flex-col gap-2 rounded-lg border border-gray-200 p-2"> <div class="flex items-center gap-1 border-b border-gray-200 p-1 pb-2"> <span class="flex-grow font-mono text-smd font-semibold" >{editableTool.endpoint}</span > <label class="ml-auto"> <span class="group relative flex w-max items-center justify-center text-sm font-semibold text-gray-700" > AI Function Name <CarbonInformation class="m-1 align-middle text-xs text-purple-500" /> <div class="pointer-events-none absolute -top-16 right-0 w-max rounded-md bg-gray-100 p-2 opacity-0 transition-opacity group-hover:opacity-100 dark:bg-gray-800" > <p class="max-w-sm text-sm font-normal text-gray-800 dark:text-gray-200"> This is the function name that will be used when prompting the model. Make sure it describes your tool well, is short and unique. </p> </div> </span> <input class="h-fit rounded-lg border-2 border-gray-200 bg-gray-100 p-1" type="text" name="name" disabled={readonly} bind:value={editableTool.name} /> </label> </div> <div> <h3 class="text-lg font-semibold">Arguments</h3> <p class="mb-2 text-sm text-gray-500"> Choose parameters that can be passed to your tool. </p> </div> <p class="text-xs text-red-500"> {getError(`inputs`, form)} </p> {#each editableTool.inputs as input, inputIdx} {@const parameter = endpoint.parameters.find( (parameter) => parameter.parameter_name === input.name )} <div class="flex items-center gap-1"> <div class="inline w-full"> <span class="font-mono text-sm">{input.name}</span> <span class="inline-block max-w-lg truncate rounded-lg bg-orange-50 p-1 text-sm text-orange-800" >{parameter?.python_type.type}</span > {#if parameter?.description} <p class="text-sm text-gray-500"> {parameter.description} </p> {/if} <div class="flex w-fit justify-start gap-2"> <label class="ml-auto"> <input type="radio" name="{input.name}-parameter-type" value="required" disabled={readonly} bind:group={editableTool.inputs[inputIdx].paramType} /> <span class="text-sm text-gray-500">Required</span> </label> <label class="ml-auto"> <input type="radio" name="{input.name}-parameter-type" value="optional" disabled={readonly || parameter?.python_type.type === "filepath"} bind:group={editableTool.inputs[inputIdx].paramType} /> <span class="text-sm text-gray-500">Optional</span> </label> <label class="ml-auto"> <input type="radio" name="{input.name}-parameter-type" value="fixed" disabled={readonly || parameter?.python_type.type === "filepath"} bind:group={editableTool.inputs[inputIdx].paramType} /> <span class="text-sm text-gray-500">Fixed</span> </label> </div> </div> </div> <!-- for required we need a description, for optional we need a default value and for fixed we need a value --> {#if input.paramType === "required" || input.paramType === "optional"} <label class="flex flex-row gap-2"> <div class="mb-1 font-semibold"> Description <p class="text-xs font-normal text-gray-500"> Will be passed in the model prompt, make it as clear and concise as possible </p> </div> <textarea name="{input.name}-description" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="This is the description of the input." bind:value={input.description} disabled={readonly} ></textarea> </label> {/if} {#if input.paramType === "optional" || input.paramType === "fixed"} {@const isOptional = input.paramType === "optional"} <div class="flex flex-row gap-2"> <div class="mb-1 flex-grow font-semibold"> {isOptional ? "Default value" : "Value"} <p class="text-xs font-normal text-gray-500"> {#if isOptional} The tool will use this value by default but the model can specify a different one. {:else} The tool will use this value and it cannot be changed. {/if} </p> </div> {#if input.paramType === "optional"} <ToolInputComponent type={parameter?.python_type.type ?? "str"} disabled={readonly} bind:value={input.default} /> {:else} <ToolInputComponent type={parameter?.python_type.type ?? "str"} disabled={readonly} bind:value={input.value} /> {/if} </div> {/if} {#if input.type === "file"} <label class="flex flex-row gap-2"> <div class="mb-1 font-semibold"> MIME types <p class="text-xs font-normal text-gray-500"> This input is a file. Specify the MIME types that are allowed to be passed to the tool. </p> </div> <select name="{input.name}-mimeTypes" class="h-fit w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" bind:value={input.mimeTypes} disabled={readonly} > <option value="image/*">image/*</option> <option value="audio/*">audio/*</option> <option value="video/*">video/*</option> <option value="application/pdf">application/pdf</option> <option value="text/csv">text/csv</option> <option value="*/*">*/*</option> </select></label > {/if} <!-- divider --> <div class="flex w-full flex-row flex-nowrap gap-2 border-b border-gray-200 pt-2" ></div> {/each} <div class="flex flex-col gap-4"> <div> <h3 class="text-lg font-semibold">Output options</h3> <p class="mb-2 text-sm text-gray-500"> Choose what value your tool will return and how </p> </div> <label class="flex flex-col gap-2" for="showOutput"> <div class="mb-1 font-semibold"> Output component <p class="text-xs font-normal text-gray-500"> Pick the gradio output component whose output will be used in the tool. </p> </div> {#if editableTool.outputComponent} {#if api.named_endpoints[editableTool.endpoint].returns.length > 1} <div class="flex flex-row gap-4"> {#each api.named_endpoints[editableTool.endpoint].returns as { component }, idx} <label class="text-gray-800"> <input type="radio" disabled={readonly || !ToolOutputComponents.safeParse(component).success} bind:group={editableTool.outputComponent} value={idx + ";" + component.toLowerCase()} name="outputComponent" /> <span class="font-mono" class:text-gray-400={!ToolOutputComponents.safeParse(component) .success} class:font-semibold={editableTool?.outputComponent?.split( ";" )[1] === component}>{component.toLowerCase()}-{idx}</span > </label> {/each} </div> {:else} <div> <input disabled checked type="radio" /> <span class="font-mono text-gray-800" >{editableTool.outputComponent.split(";")[1]}</span > </div> {/if} {/if} <p class="text-xs text-red-500"> {getError("outputComponent", form)} </p> </label> <label class="flex flex-row gap-2" for="showOutput"> <div class="mb-1 font-semibold"> Show output to user directly <p class="text-xs font-normal text-gray-500"> Some tools return long context that should not be shown to the user directly. </p> </div> <input type="checkbox" name="showOutput" bind:checked={editableTool.showOutput} class="peer rounded-lg border-2 border-gray-200 bg-gray-100 p-1" /> <p class="text-xs text-red-500"> {getError("showOutput", form)} </p> </label> </div> </div> </div> {:else if APIloading} <p class="text-sm text-gray-500">Loading API...</p> {:else if !api["named_endpoints"]} <p class="font-medium text-red-800"> No endpoints found in this space. Try another one. </p> {/if} {:catch error} <p class="text-sm text-gray-500">{error}</p> {/await} {/if} </div> <div class="relative bottom-0 mb-4 mt-auto flex w-full flex-row justify-end gap-2"> <button type="button" class="mt-4 w-fit rounded-full bg-gray-200 px-4 py-2 font-semibold text-gray-700" onclick={() => dispatch("close")} > Cancel </button> {#if !readonly} <button type="submit" disabled={formLoading || !formSubmittable} class="mt-4 w-fit rounded-full bg-black px-4 py-2 font-semibold" class:text-white={!formLoading && formSubmittable} class:text-gray-300={formLoading || !formSubmittable} class:bg-gray-400={formLoading || !formSubmittable} > {formLoading ? "Saving..." : "Save"} </button> {/if} </div> </div> </div> </form>
chat-ui/src/routes/tools/ToolEdit.svelte/0
{ "file_path": "chat-ui/src/routes/tools/ToolEdit.svelte", "repo_id": "chat-ui", "token_count": 10200 }
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def get_duration(func): def wrapper(*args, **kwargs): starttime = timeit.default_timer() _ = func(*args, **kwargs) delta = timeit.default_timer() - starttime return delta wrapper.__name__ = func.__name__ return wrapper def generate_examples(features: dict, num_examples=100, seq_shapes=None): dummy_data = [] seq_shapes = seq_shapes or {} for i in range(num_examples): example = {} for col_id, (k, v) in enumerate(features.items()): if isinstance(v, _ArrayXD): data = np.random.rand(*v.shape).astype(v.dtype) elif isinstance(v, datasets.Value): if v.dtype == "string": data = "The small grey turtle was surprisingly fast when challenged." else: data = np.random.randint(10, size=1).astype(v.dtype).item() elif isinstance(v, datasets.Sequence): while isinstance(v, datasets.Sequence): v = v.feature shape = seq_shapes[k] data = np.random.rand(*shape).astype(v.dtype) example[k] = data dummy_data.append((i, example)) return dummy_data def generate_example_dataset(dataset_path, features, num_examples=100, seq_shapes=None): dummy_data = generate_examples(features, num_examples=num_examples, seq_shapes=seq_shapes) with ArrowWriter(features=features, path=dataset_path) as writer: for key, record in dummy_data: example = features.encode_example(record) writer.write(example) num_final_examples, num_bytes = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." ) dataset = datasets.Dataset.from_file(filename=dataset_path, info=datasets.DatasetInfo(features=features)) return dataset
datasets/benchmarks/utils.py/0
{ "file_path": "datasets/benchmarks/utils.py", "repo_id": "datasets", "token_count": 927 }