text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Zamba model.""" import math import tempfile import unittest import pytest from parameterized import parameterized from transformers import AutoTokenizer, Zamba2Config, is_torch_available from transformers.testing_utils import ( require_bitsandbytes, require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Zamba2ForCausalLM, Zamba2ForSequenceClassification, Zamba2Model, ) from transformers.models.zamba2.modeling_zamba2 import ( Zamba2HybridDynamicCache, ) class Zamba2ModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=16, mamba_d_state=2, chunk_size=8, mamba_dt_rank="auto", num_hidden_layers=2, num_attention_heads=2, n_mamba_heads=8, mamba_ngroups=8, intermediate_size=4, hidden_act="gelu", hidden_mamba_act="silu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, layers_block_type=["mamba", "hybrid"], num_mem_blocks=1, use_mem_rope=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.mamba_dt_rank = mamba_dt_rank self.mamba_d_state = mamba_d_state self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.n_mamba_heads = n_mamba_heads self.mamba_ngroups = mamba_ngroups self.chunk_size = chunk_size self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_mamba_act = hidden_mamba_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.layers_block_type = layers_block_type self.num_mem_blocks = num_mem_blocks self.use_mem_rope = use_mem_rope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return Zamba2Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, mamba_dt_rank=self.mamba_dt_rank, mamba_d_state=self.mamba_d_state, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, n_mamba_heads=self.n_mamba_heads, intermediate_size=self.intermediate_size, chunk_size=self.chunk_size, hidden_act=self.hidden_act, mamba_ngroups=self.mamba_ngroups, hidden_mamba_act=self.hidden_mamba_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=True, initializer_range=self.initializer_range, use_mamba_kernels=False, layers_block_type=self.layers_block_type, num_mem_blocks=self.num_mem_blocks, use_mem_rope=self.use_mem_rope, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = Zamba2Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = Zamba2ForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) result = model(input_ids, attention_mask=input_mask) result = model(input_ids, labels=token_labels) result = model(input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.is_decoder = True config.add_cross_attention = False model = Zamba2ForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass # Attention: Zamba2 needs the cache to be initialized to return a cache! past_key_values = Zamba2HybridDynamicCache(config, input_ids.shape[0], model.dtype, device=model.device) outputs = model( input_ids, attention_mask=input_mask, past_key_values=past_key_values, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) next_mask = ids_tensor((self.batch_size, 1), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, cache_position=torch.arange( input_ids.shape[1], input_ids.shape[1] + next_tokens.shape[1], device=model.device ), )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = Zamba2ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class Zamba2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): test_torchscript = False all_model_classes = ( ( Zamba2Model, Zamba2ForCausalLM, Zamba2ForSequenceClassification, ) if is_torch_available() else () ) all_generative_model_classes = (Zamba2ForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Zamba2Model, "text-classification": Zamba2ForSequenceClassification, "text-generation": Zamba2ForCausalLM, "zero-shot": Zamba2ForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False def setUp(self): self.model_tester = Zamba2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Zamba2Config, hidden_size=37) @unittest.skip("position_ids cannot be used to pad due to Mamba2 layers") def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("Zamba2 has a hybrid cache") def test_past_key_values_format(self): r""" Zamba2's cache shape depends on whether a given layer is mamba or attention. For mamba layers, the KV cache has shape is empty and has shape [batch_size, 0]. The shape checks of this test assume instead that every layer has an attention cache, so we skip it. """ pass @unittest.skip(reason="Zamba2 has hybrid cache.") def test_generate_continue_from_inputs_embeds(self): pass @unittest.skip(reason="A large mamba2 would be necessary (and costly) for that") def test_multi_gpu_data_parallel_forward(self): pass def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_initialization(self): r""" Overriding the test_initialization test as the A_log and D params of the Mamba block are initialized differently """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "A_log" in name: A = torch.arange(1, config.n_mamba_heads + 1, dtype=torch.float32)[None, :] self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5)) elif "D" in name: # check if it's a ones like self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5)) elif "dt_bias" in name: dt = torch.exp( torch.tensor([0, 1]) * (math.log(config.time_step_max) - math.log(config.time_step_min)) + math.log(config.time_step_min) ).clamp(min=config.time_step_floor) inv_dt = dt + torch.log(-torch.expm1(-dt)) if param.requires_grad: self.assertTrue(param.data.max().item() <= inv_dt[1]) self.assertTrue(param.data.min().item() >= inv_dt[0]) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="Cumbersome and redundant for Zamba2") def test_mismatched_shapes_have_properly_initialized_weights(self): r""" Overriding the test_mismatched_shapes_have_properly_initialized_weights test because A_log and D params of the Mamba block are initialized differently and we tested that in test_initialization """ pass def test_attention_outputs(self): r""" Overriding the test_attention_outputs test as the Zamba2 model outputs attention only for its attention layers """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def _get_input_ids_and_config(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs return config, input_ids, input_mask def test_left_padding_compatibility(self): r""" Overriding the test_left_padding_compatibility test as the mamba layers accentuate the numerical differences effect of the left padding discussed in the issue in the note. Using a more permissive tolerance value. """ import inspect # NOTE: left-padding results in small numerical differences. This is expected. # See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 # First, filter out models that don't support left padding - generative and decoder-only. # Zamba2 is a decoder-only architecture decoder_only_classes = self.all_generative_model_classes # Then, test left-padding def _prepare_model_kwargs(input_ids, attention_mask, signature): model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(attention_mask, dim=-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids if "cache_position" in signature: cache_position = torch.arange(input_ids.shape[-1], device=torch_device) model_kwargs["cache_position"] = cache_position return model_kwargs for model_class in decoder_only_classes: config, input_ids, attention_mask = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() signature = inspect.signature(model.forward).parameters.keys() # Without padding model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, signature) next_logits_wo_padding = model(**model_kwargs).logits[:, -1, :] # With left-padding (length 32) pad_size = (input_ids.shape[0], 32) padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * config.pad_token_id padded_input_ids = torch.cat((padding, input_ids), dim=1) padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1) model_kwargs = _prepare_model_kwargs(padded_input_ids, padded_attention_mask, signature) next_logits_with_padding = model(**model_kwargs).logits[:, -1, :] # They should result in very similar logits self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=3e-3)) @require_flash_attn @require_torch_gpu @require_bitsandbytes @pytest.mark.flash_attn_test @slow def test_flash_attn_2_fp32_ln(self): r""" Overriding the test_flash_attn_2_fp32_ln test as the Zamba2 model, like Mixtral, doesn't support right padding + use cache with FA2 """ for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_input = inputs_dict[model.main_input_name] dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # NOTE: Zamba2 does not support right padding + use_cache with FA2. dummy_attention_mask[:, -1] = 1 model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, load_in_4bit=True, ) for _, param in model.named_parameters(): # upcast only layer norms if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) _ = model(dummy_input) # with attention mask _ = model(dummy_input, attention_mask=dummy_attention_mask) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): r""" Overriding the test_flash_attn_2_inference_padding_right test as the Zamba2 model, like Mixtral, doesn't support right padding + use cache with FA2 """ self.skipTest(reason="Zamba2 flash attention does not support right padding") @unittest.skip(reason="Zamba2 has its own special cache type") @parameterized.expand([(1, False), (1, True), (4, False)]) def test_new_cache_format(self, num_beams, do_sample): pass @require_torch class Zamba2ModelIntegrationTest(unittest.TestCase): model = None tokenizer = None @classmethod @slow def setUpClass(cls): model_id = "Zyphra/Zamba2-1.2B" cls.model = Zamba2ForCausalLM.from_pretrained( model_id, torch_dtype=torch.float32, low_cpu_mem_usage=True, revision="PR" ) cls.tokenizer = AutoTokenizer.from_pretrained(model_id, revision="PR") @parameterized.expand([(torch_device,), ("cpu",)]) @slow def test_simple_generate(self, torch_device): self.model.to(torch_device) input_ids = self.tokenizer("Hey how are you doing on this lovely evening?", return_tensors="pt")[ "input_ids" ].to(torch_device) out = self.model.generate(input_ids, do_sample=False, max_new_tokens=10) output_sentence = self.tokenizer.decode(out[0, :]) self.assertEqual( output_sentence, "<s> Hey how are you doing on this lovely evening?\n\nI'm doing well, thanks for", ) with torch.no_grad(): logits = self.model(input_ids=input_ids).logits.to(dtype=torch.float32) EXPECTED_LOGITS_NO_GRAD = torch.tensor( [ -5.9587, 10.5152, 7.0382, -2.8728, -4.8143, -4.8142, -4.8142, -4.8144, -4.8143, -4.8143, -4.8142, -4.8142, 6.0185, 18.0037, -4.8142, -4.8144, -4.8143, -4.8142, -4.8143, -4.8143, -4.8143, -4.8143, -4.8142, -4.8143, -4.8144, -4.8143, -4.8143, -4.8141, -4.8142, -4.8142, -4.8142, -4.8144, -4.8143, -4.8143, -4.8143, -4.8142, -4.8144, -4.8144, -4.8142, -4.8142 ] , dtype=torch.float32) # fmt: skip torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD, rtol=1e-3, atol=1e-3) @parameterized.expand([(torch_device,), ("cpu",)]) @slow def test_simple_batched_generate_with_padding(self, torch_device): self.model.to(torch_device) inputs = self.tokenizer( ["Hey how are you doing on this lovely evening?", "When did the Roman empire "], padding=True, return_tensors="pt", ).to(torch_device) out = self.model.generate(**inputs, do_sample=False, max_new_tokens=10) output_sentences = self.tokenizer.batch_decode(out) self.assertEqual( output_sentences[0], "<s> Hey how are you doing on this lovely evening?\n\nI'm doing well, thanks for", ) self.assertEqual( output_sentences[1], "[PAD][PAD][PAD][PAD]<s> When did the Roman empire 1st fall?\nThe Roman Empire fell in", ) with torch.no_grad(): logits = self.model(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"]).logits.to( dtype=torch.float32 ) EXPECTED_LOGITS_NO_GRAD_0 = torch.tensor( [ -5.9611, 10.5208, 7.0411, -2.8743, -4.8167, -4.8167, -4.8167, -4.8168, -4.8167, -4.8167, -4.8167, -4.8166, 6.0218, 18.0062, -4.8167, -4.8168, -4.8167, -4.8167, -4.8167, -4.8168, -4.8168, -4.8168, -4.8167, -4.8167, -4.8168, -4.8167, -4.8167, -4.8165, -4.8167, -4.8167, -4.8167, -4.8169, -4.8168, -4.8168, -4.8168, -4.8166, -4.8169, -4.8168, -4.8167, -4.8167 ] , dtype=torch.float32) # fmt: skip EXPECTED_LOGITS_NO_GRAD_1 = torch.tensor( [ 0.1966, 6.3449, 3.8350, -5.7291, -6.5106, -6.5104, -6.5103, -6.5104, -6.5103, -6.5104, -6.5106, -6.5105, 7.8700, 13.5434, -6.5104, -6.5096, -6.5106, -6.5102, -6.5106, -6.5106, -6.5105, -6.5106, -6.5104, -6.5106, -6.5105, -6.5106, -6.5106, -6.5113, -6.5102, -6.5105, -6.5108, -6.5105, -6.5104, -6.5106, -6.5106, -6.5104, -6.5106, -6.5107, -6.5103, -6.5105 ] , dtype=torch.float32) # fmt: skip torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_0, rtol=1e-3, atol=1e-3) torch.testing.assert_close( logits[1, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_1, rtol=1e-3, atol=6e-3 if torch_device == "cpu" else 1e-3, )
transformers/tests/models/zamba2/test_modeling_zamba2.py/0
{ "file_path": "transformers/tests/models/zamba2/test_modeling_zamba2.py", "repo_id": "transformers", "token_count": 13146 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub import ImageClassificationOutputElement from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, PreTrainedTokenizerBase, is_torch_available, is_vision_available, ) from transformers.pipelines import ImageClassificationPipeline, pipeline from transformers.testing_utils import ( compare_pipeline_output_to_hub_spec, is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_or_tf, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch_or_tf @require_vision class ImageClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): image_classifier = ImageClassificationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, top_k=2, ) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] return image_classifier, examples def run_pipeline_test(self, image_classifier, examples): outputs = image_classifier("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") # Accepts URL + PIL.Image + lists outputs = image_classifier( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] ) self.assertEqual( outputs, [ [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ], ) for single_output in outputs: for output_element in single_output: compare_pipeline_output_to_hub_spec(output_element, ImageClassificationOutputElement) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) @require_tf def test_small_model_tf(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model, framework="tf") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) def test_custom_tokenizer(self): tokenizer = PreTrainedTokenizerBase() # Assert that the pipeline can be initialized with a feature extractor that is not in any mapping image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", tokenizer=tokenizer ) self.assertIs(image_classifier.tokenizer, tokenizer) @require_torch def test_torch_float16_pipeline(self): image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", torch_dtype=torch.float16 ) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=3), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) @require_torch def test_torch_bfloat16_pipeline(self): image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", torch_dtype=torch.bfloat16 ) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=3), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) @slow @require_torch def test_perceiver(self): # Perceiver is not tested by `run_pipeline_test` properly. # That is because the type of feature_extractor and model preprocessor need to be kept # in sync, which is not the case in the current design image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-conv") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4385, "label": "tabby, tabby cat"}, {"score": 0.321, "label": "tiger cat"}, {"score": 0.0502, "label": "Egyptian cat"}, {"score": 0.0137, "label": "crib, cot"}, {"score": 0.007, "label": "radiator"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-fourier") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.5658, "label": "tabby, tabby cat"}, {"score": 0.1309, "label": "tiger cat"}, {"score": 0.0722, "label": "Egyptian cat"}, {"score": 0.0707, "label": "remote control, remote"}, {"score": 0.0082, "label": "computer keyboard, keypad"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-learned") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3022, "label": "tabby, tabby cat"}, {"score": 0.2362, "label": "Egyptian cat"}, {"score": 0.1856, "label": "tiger cat"}, {"score": 0.0324, "label": "remote control, remote"}, {"score": 0.0096, "label": "quilt, comforter, comfort, puff"}, ], ) @slow @require_torch def test_multilabel_classification(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) image_classifier.model.config.problem_type = "multi_label_classification" outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ], ) @slow @require_torch def test_function_to_apply(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier( "http://images.cocodataset.org/val2017/000000039769.jpg", function_to_apply="sigmoid", ) self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], )
transformers/tests/pipelines/test_pipelines_image_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_classification.py", "repo_id": "transformers", "token_count": 5745 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pytest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MBart50TokenizerFast, MBartConfig, MBartForConditionalGeneration, TranslationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow from .test_pipelines_common import ANY @is_pipeline_test class TranslationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): if isinstance(model.config, MBartConfig): src_lang, tgt_lang = list(tokenizer.lang_code_to_id.keys())[:2] translator = TranslationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, src_lang=src_lang, tgt_lang=tgt_lang, ) else: translator = TranslationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, ) return translator, ["Some string", "Some other text"] def run_pipeline_test(self, translator, _): outputs = translator("Some string") self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string", "other string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}, {"translation_text": ANY(str)}]) @require_torch def test_small_model_pt(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_tf def test_small_model_tf(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_torch def test_en_to_de_pt(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) @require_tf def test_en_to_de_tf(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) class TranslationNewFormatPipelineTests(unittest.TestCase): @require_torch @slow def test_default_translations(self): # We don't provide a default for this pair with self.assertRaises(ValueError): pipeline(task="translation_cn_to_ar") # but we do for this one translator = pipeline(task="translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch @slow def test_multilingual_translation(self): model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") translator = pipeline(task="translation", model=model, tokenizer=tokenizer) # Missing src_lang, tgt_lang with self.assertRaises(ValueError): translator("This is a test") outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN") self.assertEqual(outputs, [{"translation_text": "यह एक परीक्षण है"}]) # src_lang, tgt_lang can be defined at pipeline call time translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR") outputs = translator("This is a test") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) @require_torch def test_translation_on_odd_language(self): model = "patrickvonplaten/t5-tiny-random" translator = pipeline(task="translation_cn_to_ar", model=model) self.assertEqual(translator._preprocess_params["src_lang"], "cn") self.assertEqual(translator._preprocess_params["tgt_lang"], "ar") @require_torch def test_translation_default_language_selection(self): model = "patrickvonplaten/t5-tiny-random" with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"): translator = pipeline(task="translation", model=model) self.assertEqual(translator.task, "translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch def test_translation_with_no_language_no_model_fails(self): with self.assertRaises(ValueError): pipeline(task="translation")
transformers/tests/pipelines/test_pipelines_translation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_translation.py", "repo_id": "transformers", "token_count": 3494 }
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.models.opt.modeling_opt import OPTAttention from transformers.testing_utils import ( apply_skip_if_not_implemented, is_accelerate_available, is_bitsandbytes_available, is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu_if_bnb_not_multi_backend_enabled, require_torch_multi_gpu, slow, torch_device, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc elif model.config.model_type == "llama": return model.model.layers[0].mlp.gate_proj return model.transformer.h[0].mlp.dense_4h_to_h if is_accelerate_available(): from accelerate import PartialState from accelerate.logging import get_logger logger = get_logger(__name__) _ = PartialState() if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only""" def __init__(self, module: nn.Module, rank: int, dtype: torch.dtype): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False, dtype=dtype), nn.Linear(rank, module.out_features, bias=False, dtype=dtype), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) if is_bitsandbytes_available(): import bitsandbytes as bnb @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu_if_bnb_not_multi_backend_enabled @slow class BaseMixedInt8Test(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "bigscience/bloom-1b7" # Constant values EXPECTED_RELATIVE_DIFFERENCE = ( 1.540025 # This was obtained on a Quadro RTX 8000 so the number might slightly change ) input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of the family.\n") # Expected values on a A10 EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") MAX_NEW_TOKENS = 10 # Expected values with offload EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer based in") def setUp(self): # Models and tokenizer self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) @apply_skip_if_not_implemented class MixedInt8Test(BaseMixedInt8Test): def setUp(self): super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() def test_get_keys_to_not_convert_trust_remote_code(self): r""" Test the `get_keys_to_not_convert` function with `trust_remote_code` models. """ from accelerate import init_empty_weights from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained( model_id, trust_remote_code=True, revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) with init_empty_weights(): model = AutoModelForCausalLM.from_config( config, trust_remote_code=True, code_revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) self.assertEqual(get_keys_to_not_convert(model), ["transformer.wte"]) def test_get_keys_to_not_convert(self): r""" Test the `get_keys_to_not_convert` function. """ from accelerate import init_empty_weights from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained(model_id, revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7") with init_empty_weights(): model = MptForCausalLM(config) # The order of the keys does not matter, so we sort them before comparing, same for the other tests. self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "transformer.wte"].sort()) model_id = "Salesforce/blip2-opt-2.7b" config = AutoConfig.from_pretrained(model_id, revision="1ef7f63a8f0a144c13fdca8103eb7b4691c74cec") with init_empty_weights(): model = Blip2ForConditionalGeneration(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["language_model.lm_head", "language_model.model.decoder.embed_tokens"].sort(), ) model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") with init_empty_weights(): model = OPTForCausalLM(config) self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "model.decoder.embed_tokens"].sort()) model_id = "FacebookAI/roberta-large" config = AutoConfig.from_pretrained(model_id, revision="716877d372b884cad6d419d828bac6c85b3b18d9") with init_empty_weights(): model = AutoModelForMaskedLM.from_config(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["'roberta.embeddings.word_embeddings', 'lm_head', 'lm_head.decoder"].sort(), ) def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_8bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.model_8bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_8bit.config._pre_quantization_dtype == torch.float16) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE, delta=1e-5) self.assertTrue(get_some_linear_layer(self.model_8bit).weight.__class__ == Int8Params) def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.int8) def test_llm_skip(self): r""" A simple test to check if `llm_int8_skip_modules` works as expected """ quantization_config = BitsAndBytesConfig(load_in_8bit=True, llm_int8_skip_modules=["classifier"]) seq_classification_model = AutoModelForSequenceClassification.from_pretrained( "FacebookAI/roberta-large-mnli", quantization_config=quantization_config ) self.assertTrue(seq_classification_model.roberta.encoder.layer[0].output.dense.weight.dtype == torch.int8) self.assertTrue( isinstance(seq_classification_model.roberta.encoder.layer[0].output.dense, bnb.nn.Linear8bitLt) ) self.assertTrue(isinstance(seq_classification_model.classifier.dense, nn.Linear)) self.assertTrue(seq_classification_model.classifier.dense.weight.dtype != torch.int8) self.assertTrue(isinstance(seq_classification_model.classifier.out_proj, nn.Linear)) self.assertTrue(seq_classification_model.classifier.out_proj != torch.int8) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_8bit.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_8bit = True model_8bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit_from_config.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_dequantize(self): r""" Test that loading the model and dequantizing it produce correct results """ bnb_config = BitsAndBytesConfig(load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) model_8bit.dequantize() encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_raise_if_config_and_load_in_8bit(self): r""" Test that loading the model with the config and `load_in_8bit` raises an error """ bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_8bit=True, device_map="auto", llm_int8_enable_fp32_cpu_offload=True, ) def test_device_and_dtype_assignment(self): r""" Test whether attempting to change the device or cast the dtype of a model after converting it to 8-bit precision will raise an appropriate error. The test ensures that such operations are prohibited on 8-bit models to prevent invalid conversions. """ with self.assertRaises(ValueError): # Tries with `str` self.model_8bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `dtype`` self.model_8bit.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.to(torch.device(torch_device)) with self.assertRaises(ValueError): # Tries to cast the 8-bit model to float32 using `float()` self.model_8bit.float() with self.assertRaises(ValueError): # Tries to cast the 4-bit model to float16 using `half()` self.model_8bit.half() # Test if we did not break anything encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float() def test_fp32_int8_conversion(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small", load_in_8bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) def test_int8_serialization(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_serialization_regression(self): r""" Test whether it is possible to serialize a model in 8-bit - using not safetensors """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, safe_serialization=False) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_serialization_sharded(self): r""" Test whether it is possible to serialize a model in 8-bit - sharded version. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, max_shard_size="200MB") # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname) linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/bloom-1b7-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu_if_bnb_not_multi_backend_enabled @slow class MixedInt8T5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "google-t5/t5-small" cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ gc.collect() torch.cuda.empty_cache() def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) def test_inference_with_keep_in_fp32_serialized(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly on a serialized model. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = T5ForConditionalGeneration.from_pretrained(tmp_dir) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(model.device) _ = model.generate(**encoded_input) class MixedInt8ModelClassesTest(BaseMixedInt8Test): def setUp(self): super().setUp() # model_name self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "google-t5/t5-small" # Different types of model self.base_model = AutoModel.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Sequence classification model self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_8bit=True, device_map="auto" ) # CausalLM model self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Seq2seq model self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_8bit=True, device_map="auto" ) def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_8bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Int8Params # last param of a base model should be a linear8bit module self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) # Other heads should be nn.Parameter self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) @apply_skip_if_not_implemented class MixedInt8TestPipeline(BaseMixedInt8Test): def setUp(self): super().setUp() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ if hasattr(self, "pipe"): del self.pipe gc.collect() torch.cuda.empty_cache() def test_pipeline(self): r""" The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_8bit": True}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS) @require_torch_multi_gpu @apply_skip_if_not_implemented class MixedInt8TestMultiGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_8bit=True, device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate( input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_torch_multi_gpu @apply_skip_if_not_implemented class MixedInt8TestCpuGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def check_inference_correctness(self, model): # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) # Get the generation output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True) self.assertIn(output_text, self.EXPECTED_OUTPUTS) def test_cpu_gpu_loading_random_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time the device map is more organized than the test above and uses the abstraction `transformer.h` to encapsulate all the decoder layers. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": "cpu", "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) @apply_skip_if_not_implemented class MixedInt8TestTraining(BaseMixedInt8Test): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): self.skipTest(reason="This test requires bitsandbytes>=0.37.0") # Step 1: freeze all parameters model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True) if torch.cuda.is_available(): self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()}) elif torch.xpu.is_available(): self.assertEqual(set(model.hf_device_map.values()), {f"xpu:{torch.xpu.current_device()}"}) else: self.assertTrue(all(param.device.type == "cpu" for param in model.parameters())) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later # cast all non INT8 parameters to fp32 if param.dtype in (torch.float16, torch.bfloat16) and param.__class__.__name__ != "Params4bit": param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in model.named_modules(): if isinstance(module, OPTAttention): module.q_proj = LoRALayer(module.q_proj, rank=16, dtype=model.dtype) module.k_proj = LoRALayer(module.k_proj, rank=16, dtype=model.dtype) module.v_proj = LoRALayer(module.v_proj, rank=16, dtype=model.dtype) # Step 3: dummy batch batch = self.tokenizer("Test batch ", return_tensors="pt").to(torch_device) # Step 4: Check if the gradient is not None if torch_device in {"xpu", "cpu"}: # XPU and CPU finetune do not support autocast for now. out = model.forward(**batch) out.logits.norm().backward() else: with torch.autocast(torch_device): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) @apply_skip_if_not_implemented @unittest.skipIf(torch_device == "xpu", reason="XPU has precision issue on gpt model, will test it once fixed") class MixedInt8GPT2Test(MixedInt8Test): model_name = "openai-community/gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 1.8720077507258357 EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I'm a big fan of") EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I'm a fan of the") # Expected values on a A10 EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I am a member of the") # Expected values on Intel CPU EXPECTED_OUTPUTS.add("Hello my name is John Doe. I am a man. I am") EXPECTED_OUTPUTS.add("Hello my name is John, and I'm a writer. I'm") def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/gpt2-xl-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) class MixedInt8LlamaTest(MixedInt8Test): model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" EXPECTED_RELATIVE_DIFFERENCE = 1.7869331026479096 EXPECTED_OUTPUTS = set() # Expected on Intel XPU EXPECTED_OUTPUTS.add("Hello my name is John Smith and I am a software engineer. I") # Expected on NVIDIA T4 EXPECTED_OUTPUTS.add("Hello my name is John and I am a software engineer. I have") def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "Jiqing/TinyLlama-1.1B-Chat-v1.0-bnb-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
transformers/tests/quantization/bnb/test_mixed_int8.py/0
{ "file_path": "transformers/tests/quantization/bnb/test_mixed_int8.py", "repo_id": "transformers", "token_count": 17432 }
import json import logging import os import subprocess from argparse import ArgumentParser logger = logging.getLogger(__name__) def parse_args(): parser = ArgumentParser() parsed, unknown = parser.parse_known_args() for arg in unknown: if arg.startswith(("-", "--")): parser.add_argument(arg.split("=")[0]) return parser.parse_args() def main(): args = parse_args() port = 8888 num_gpus = int(os.environ["SM_NUM_GPUS"]) hosts = json.loads(os.environ["SM_HOSTS"]) num_nodes = len(hosts) current_host = os.environ["SM_CURRENT_HOST"] rank = hosts.index(current_host) os.environ["NCCL_DEBUG"] = "INFO" if num_nodes > 1: cmd = f"""python -m torch.distributed.launch \ --nnodes={num_nodes} \ --node_rank={rank} \ --nproc_per_node={num_gpus} \ --master_addr={hosts[0]} \ --master_port={port} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" else: cmd = f"""python -m torch.distributed.launch \ --nproc_per_node={num_gpus} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" try: subprocess.run(cmd, shell=True) except Exception as e: logger.info(e) if __name__ == "__main__": main()
transformers/tests/sagemaker/scripts/pytorch/run_ddp.py/0
{ "file_path": "transformers/tests/sagemaker/scripts/pytorch/run_ddp.py", "repo_id": "transformers", "token_count": 694 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import json import os import random import re import unittest from dataclasses import fields, is_dataclass from pathlib import Path from textwrap import dedent from typing import get_args from huggingface_hub import ( AudioClassificationInput, AutomaticSpeechRecognitionInput, DepthEstimationInput, ImageClassificationInput, ImageSegmentationInput, ImageToTextInput, ObjectDetectionInput, QuestionAnsweringInput, VideoClassificationInput, ZeroShotImageClassificationInput, ) from transformers.models.auto.processing_auto import PROCESSOR_MAPPING_NAMES from transformers.pipelines import ( AudioClassificationPipeline, AutomaticSpeechRecognitionPipeline, DepthEstimationPipeline, ImageClassificationPipeline, ImageSegmentationPipeline, ImageToTextPipeline, ObjectDetectionPipeline, QuestionAnsweringPipeline, VideoClassificationPipeline, ZeroShotImageClassificationPipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_av, require_pytesseract, require_timm, require_torch, require_torch_or_tf, require_vision, ) from transformers.utils import direct_transformers_import, logging from .pipelines.test_pipelines_audio_classification import AudioClassificationPipelineTests from .pipelines.test_pipelines_automatic_speech_recognition import AutomaticSpeechRecognitionPipelineTests from .pipelines.test_pipelines_depth_estimation import DepthEstimationPipelineTests from .pipelines.test_pipelines_document_question_answering import DocumentQuestionAnsweringPipelineTests from .pipelines.test_pipelines_feature_extraction import FeatureExtractionPipelineTests from .pipelines.test_pipelines_fill_mask import FillMaskPipelineTests from .pipelines.test_pipelines_image_classification import ImageClassificationPipelineTests from .pipelines.test_pipelines_image_feature_extraction import ImageFeatureExtractionPipelineTests from .pipelines.test_pipelines_image_segmentation import ImageSegmentationPipelineTests from .pipelines.test_pipelines_image_text_to_text import ImageTextToTextPipelineTests from .pipelines.test_pipelines_image_to_image import ImageToImagePipelineTests from .pipelines.test_pipelines_image_to_text import ImageToTextPipelineTests from .pipelines.test_pipelines_mask_generation import MaskGenerationPipelineTests from .pipelines.test_pipelines_object_detection import ObjectDetectionPipelineTests from .pipelines.test_pipelines_question_answering import QAPipelineTests from .pipelines.test_pipelines_summarization import SummarizationPipelineTests from .pipelines.test_pipelines_table_question_answering import TQAPipelineTests from .pipelines.test_pipelines_text2text_generation import Text2TextGenerationPipelineTests from .pipelines.test_pipelines_text_classification import TextClassificationPipelineTests from .pipelines.test_pipelines_text_generation import TextGenerationPipelineTests from .pipelines.test_pipelines_text_to_audio import TextToAudioPipelineTests from .pipelines.test_pipelines_token_classification import TokenClassificationPipelineTests from .pipelines.test_pipelines_translation import TranslationPipelineTests from .pipelines.test_pipelines_video_classification import VideoClassificationPipelineTests from .pipelines.test_pipelines_visual_question_answering import VisualQuestionAnsweringPipelineTests from .pipelines.test_pipelines_zero_shot import ZeroShotClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_audio_classification import ZeroShotAudioClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_image_classification import ZeroShotImageClassificationPipelineTests from .pipelines.test_pipelines_zero_shot_object_detection import ZeroShotObjectDetectionPipelineTests pipeline_test_mapping = { "audio-classification": {"test": AudioClassificationPipelineTests}, "automatic-speech-recognition": {"test": AutomaticSpeechRecognitionPipelineTests}, "depth-estimation": {"test": DepthEstimationPipelineTests}, "document-question-answering": {"test": DocumentQuestionAnsweringPipelineTests}, "feature-extraction": {"test": FeatureExtractionPipelineTests}, "fill-mask": {"test": FillMaskPipelineTests}, "image-classification": {"test": ImageClassificationPipelineTests}, "image-feature-extraction": {"test": ImageFeatureExtractionPipelineTests}, "image-segmentation": {"test": ImageSegmentationPipelineTests}, "image-text-to-text": {"test": ImageTextToTextPipelineTests}, "image-to-image": {"test": ImageToImagePipelineTests}, "image-to-text": {"test": ImageToTextPipelineTests}, "mask-generation": {"test": MaskGenerationPipelineTests}, "object-detection": {"test": ObjectDetectionPipelineTests}, "question-answering": {"test": QAPipelineTests}, "summarization": {"test": SummarizationPipelineTests}, "table-question-answering": {"test": TQAPipelineTests}, "text2text-generation": {"test": Text2TextGenerationPipelineTests}, "text-classification": {"test": TextClassificationPipelineTests}, "text-generation": {"test": TextGenerationPipelineTests}, "text-to-audio": {"test": TextToAudioPipelineTests}, "token-classification": {"test": TokenClassificationPipelineTests}, "translation": {"test": TranslationPipelineTests}, "video-classification": {"test": VideoClassificationPipelineTests}, "visual-question-answering": {"test": VisualQuestionAnsweringPipelineTests}, "zero-shot": {"test": ZeroShotClassificationPipelineTests}, "zero-shot-audio-classification": {"test": ZeroShotAudioClassificationPipelineTests}, "zero-shot-image-classification": {"test": ZeroShotImageClassificationPipelineTests}, "zero-shot-object-detection": {"test": ZeroShotObjectDetectionPipelineTests}, } task_to_pipeline_and_spec_mapping = { # Adding a task to this list will cause its pipeline input signature to be checked against the corresponding # task spec in the HF Hub "audio-classification": (AudioClassificationPipeline, AudioClassificationInput), "automatic-speech-recognition": (AutomaticSpeechRecognitionPipeline, AutomaticSpeechRecognitionInput), "depth-estimation": (DepthEstimationPipeline, DepthEstimationInput), "image-classification": (ImageClassificationPipeline, ImageClassificationInput), "image-segmentation": (ImageSegmentationPipeline, ImageSegmentationInput), "image-to-text": (ImageToTextPipeline, ImageToTextInput), "object-detection": (ObjectDetectionPipeline, ObjectDetectionInput), "question-answering": (QuestionAnsweringPipeline, QuestionAnsweringInput), "video-classification": (VideoClassificationPipeline, VideoClassificationInput), "zero-shot-image-classification": (ZeroShotImageClassificationPipeline, ZeroShotImageClassificationInput), } for task, task_info in pipeline_test_mapping.items(): test = task_info["test"] task_info["mapping"] = { "pt": getattr(test, "model_mapping", None), "tf": getattr(test, "tf_model_mapping", None), } # The default value `hf-internal-testing` is for running the pipeline testing against the tiny models on the Hub. # For debugging purpose, we can specify a local path which is the `output_path` argument of a previous run of # `utils/create_dummy_models.py`. TRANSFORMERS_TINY_MODEL_PATH = os.environ.get("TRANSFORMERS_TINY_MODEL_PATH", "hf-internal-testing") if TRANSFORMERS_TINY_MODEL_PATH == "hf-internal-testing": TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(Path(__file__).parent.parent, "tests/utils/tiny_model_summary.json") else: TINY_MODEL_SUMMARY_FILE_PATH = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, "reports", "tiny_model_summary.json") with open(TINY_MODEL_SUMMARY_FILE_PATH) as fp: tiny_model_summary = json.load(fp) PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent, "src/transformers") # Dynamically import the Transformers module to grab the attribute classes of the processor form their names. transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS) logger = logging.get_logger(__name__) class PipelineTesterMixin: model_tester = None pipeline_model_mapping = None supported_frameworks = ["pt", "tf"] def run_task_tests(self, task, torch_dtype="float32"): """Run pipeline tests for a specific `task` Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. torch_dtype (`str`, `optional`, defaults to `'float32'`): The torch dtype to use for the model. Can be used for FP16/other precision inference. """ if task not in self.pipeline_model_mapping: self.skipTest( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: `{task}` is not in " f"`self.pipeline_model_mapping` for `{self.__class__.__name__}`." ) model_architectures = self.pipeline_model_mapping[task] if not isinstance(model_architectures, tuple): model_architectures = (model_architectures,) # We are going to run tests for multiple model architectures, some of them might be skipped # with this flag we are control if at least one model were tested or all were skipped at_least_one_model_is_tested = False for model_architecture in model_architectures: model_arch_name = model_architecture.__name__ model_type = model_architecture.config_class.model_type # Get the canonical name for _prefix in ["Flax", "TF"]: if model_arch_name.startswith(_prefix): model_arch_name = model_arch_name[len(_prefix) :] break if model_arch_name not in tiny_model_summary: continue tokenizer_names = tiny_model_summary[model_arch_name]["tokenizer_classes"] # Sort image processors and feature extractors from tiny-models json file image_processor_names = [] feature_extractor_names = [] processor_classes = tiny_model_summary[model_arch_name]["processor_classes"] for cls_name in processor_classes: if "ImageProcessor" in cls_name: image_processor_names.append(cls_name) elif "FeatureExtractor" in cls_name: feature_extractor_names.append(cls_name) # Processor classes are not in tiny models JSON file, so extract them from the mapping # processors are mapped to instance, e.g. "XxxProcessor" processor_names = PROCESSOR_MAPPING_NAMES.get(model_type, None) if not isinstance(processor_names, (list, tuple)): processor_names = [processor_names] commit = None if model_arch_name in tiny_model_summary and "sha" in tiny_model_summary[model_arch_name]: commit = tiny_model_summary[model_arch_name]["sha"] repo_name = f"tiny-random-{model_arch_name}" if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing": repo_name = model_arch_name self.run_model_pipeline_tests( task, repo_name, model_architecture, tokenizer_names=tokenizer_names, image_processor_names=image_processor_names, feature_extractor_names=feature_extractor_names, processor_names=processor_names, commit=commit, torch_dtype=torch_dtype, ) at_least_one_model_is_tested = True if task in task_to_pipeline_and_spec_mapping: pipeline, hub_spec = task_to_pipeline_and_spec_mapping[task] compare_pipeline_args_to_hub_spec(pipeline, hub_spec) if not at_least_one_model_is_tested: self.skipTest( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: Could not find any " f"model architecture in the tiny models JSON file for `{task}`." ) def run_model_pipeline_tests( self, task, repo_name, model_architecture, tokenizer_names, image_processor_names, feature_extractor_names, processor_names, commit, torch_dtype="float32", ): """Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class names Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. repo_name (`str`): A model repository id on the Hub. model_architecture (`type`): A subclass of `PretrainedModel` or `PretrainedModel`. tokenizer_names (`List[str]`): A list of names of a subclasses of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`. image_processor_names (`List[str]`): A list of names of subclasses of `BaseImageProcessor`. feature_extractor_names (`List[str]`): A list of names of subclasses of `FeatureExtractionMixin`. processor_names (`List[str]`): A list of names of subclasses of `ProcessorMixin`. commit (`str`): The commit hash of the model repository on the Hub. torch_dtype (`str`, `optional`, defaults to `'float32'`): The torch dtype to use for the model. Can be used for FP16/other precision inference. """ # Get an instance of the corresponding class `XXXPipelineTests` in order to use `get_test_pipeline` and # `run_pipeline_test`. pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__ # If no image processor or feature extractor is found, we still need to test the pipeline with None # otherwise for any empty list we might skip all the tests tokenizer_names = tokenizer_names or [None] image_processor_names = image_processor_names or [None] feature_extractor_names = feature_extractor_names or [None] processor_names = processor_names or [None] test_cases = [ { "tokenizer_name": tokenizer_name, "image_processor_name": image_processor_name, "feature_extractor_name": feature_extractor_name, "processor_name": processor_name, } for tokenizer_name in tokenizer_names for image_processor_name in image_processor_names for feature_extractor_name in feature_extractor_names for processor_name in processor_names ] for test_case in test_cases: tokenizer_name = test_case["tokenizer_name"] image_processor_name = test_case["image_processor_name"] feature_extractor_name = test_case["feature_extractor_name"] processor_name = test_case["processor_name"] do_skip_test_case = self.is_pipeline_test_to_skip( pipeline_test_class_name, model_architecture.config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ) if do_skip_test_case: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: test is " f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer " f"`{tokenizer_name}` | image processor `{image_processor_name}` | feature extractor {feature_extractor_name}." ) continue self.run_pipeline_test( task, repo_name, model_architecture, tokenizer_name=tokenizer_name, image_processor_name=image_processor_name, feature_extractor_name=feature_extractor_name, processor_name=processor_name, commit=commit, torch_dtype=torch_dtype, ) def run_pipeline_test( self, task, repo_name, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, commit, torch_dtype="float32", ): """Run pipeline tests for a specific `task` with the give model class and tokenizer/processor class name The model will be loaded from a model repository on the Hub. Args: task (`str`): A task name. This should be a key in the mapping `pipeline_test_mapping`. repo_name (`str`): A model repository id on the Hub. model_architecture (`type`): A subclass of `PretrainedModel` or `PretrainedModel`. tokenizer_name (`str`): The name of a subclass of `PreTrainedTokenizerFast` or `PreTrainedTokenizer`. image_processor_name (`str`): The name of a subclass of `BaseImageProcessor`. feature_extractor_name (`str`): The name of a subclass of `FeatureExtractionMixin`. processor_name (`str`): The name of a subclass of `ProcessorMixin`. commit (`str`): The commit hash of the model repository on the Hub. torch_dtype (`str`, `optional`, defaults to `'float32'`): The torch dtype to use for the model. Can be used for FP16/other precision inference. """ repo_id = f"{TRANSFORMERS_TINY_MODEL_PATH}/{repo_name}" model_type = model_architecture.config_class.model_type if TRANSFORMERS_TINY_MODEL_PATH != "hf-internal-testing": repo_id = os.path.join(TRANSFORMERS_TINY_MODEL_PATH, model_type, repo_name) # -------------------- Load model -------------------- # TODO: We should check if a model file is on the Hub repo. instead. try: model = model_architecture.from_pretrained(repo_id, revision=commit) except Exception: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: Could not find or load " f"the model from `{repo_id}` with `{model_architecture}`." ) self.skipTest(f"Could not find or load the model from {repo_id} with {model_architecture}.") # -------------------- Load tokenizer -------------------- tokenizer = None if tokenizer_name is not None: tokenizer_class = getattr(transformers_module, tokenizer_name) tokenizer = tokenizer_class.from_pretrained(repo_id, revision=commit) # -------------------- Load processors -------------------- processors = {} for key, name in zip( ["image_processor", "feature_extractor", "processor"], [image_processor_name, feature_extractor_name, processor_name], ): if name is not None: try: # Can fail if some extra dependencies are not installed processor_class = getattr(transformers_module, name) processor = processor_class.from_pretrained(repo_id, revision=commit) processors[key] = processor except Exception: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: " f"Could not load the {key} from `{repo_id}` with `{name}`." ) self.skipTest(f"Could not load the {key} from {repo_id} with {name}.") # --------------------------------------------------------- # TODO: Maybe not upload such problematic tiny models to Hub. if tokenizer is None and "image_processor" not in processors and "feature_extractor" not in processors: logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: Could not find or load " f"any tokenizer / image processor / feature extractor from `{repo_id}`." ) self.skipTest(f"Could not find or load any tokenizer / processor from {repo_id}.") pipeline_test_class_name = pipeline_test_mapping[task]["test"].__name__ if self.is_pipeline_test_to_skip_more(pipeline_test_class_name, model.config, model, tokenizer, **processors): logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: test is " f"currently known to fail for: model `{model_architecture.__name__}` | tokenizer " f"`{tokenizer_name}` | image processor `{image_processor_name}` | feature extractor `{feature_extractor_name}`." ) self.skipTest( f"Test is known to fail for: model `{model_architecture.__name__}` | tokenizer `{tokenizer_name}` " f"| image processor `{image_processor_name}` | feature extractor `{feature_extractor_name}`." ) # validate validate_test_components(model, tokenizer) if hasattr(model, "eval"): model = model.eval() # Get an instance of the corresponding class `XXXPipelineTests` in order to use `get_test_pipeline` and # `run_pipeline_test`. task_test = pipeline_test_mapping[task]["test"]() pipeline, examples = task_test.get_test_pipeline(model, tokenizer, **processors, torch_dtype=torch_dtype) if pipeline is None: # The test can disable itself, but it should be very marginal # Concerns: Wav2Vec2ForCTC without tokenizer test (FastTokenizer don't exist) logger.warning( f"{self.__class__.__name__}::test_pipeline_{task.replace('-', '_')}_{torch_dtype} is skipped: Could not get the " "pipeline for testing." ) self.skipTest(reason="Could not get the pipeline for testing.") task_test.run_pipeline_test(pipeline, examples) def run_batch_test(pipeline, examples): # Need to copy because `Conversation` are stateful if pipeline.tokenizer is not None and pipeline.tokenizer.pad_token_id is None: return # No batching for this and it's OK # 10 examples with batch size 4 means there needs to be a unfinished batch # which is important for the unbatcher def data(n): for _ in range(n): # Need to copy because Conversation object is mutated yield copy.deepcopy(random.choice(examples)) out = [] for item in pipeline(data(10), batch_size=4): out.append(item) self.assertEqual(len(out), 10) run_batch_test(pipeline, examples) @is_pipeline_test def test_pipeline_audio_classification(self): self.run_task_tests(task="audio-classification") @is_pipeline_test @require_torch def test_pipeline_audio_classification_fp16(self): self.run_task_tests(task="audio-classification", torch_dtype="float16") @is_pipeline_test def test_pipeline_automatic_speech_recognition(self): self.run_task_tests(task="automatic-speech-recognition") @is_pipeline_test @require_torch def test_pipeline_automatic_speech_recognition_fp16(self): self.run_task_tests(task="automatic-speech-recognition", torch_dtype="float16") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_depth_estimation(self): self.run_task_tests(task="depth-estimation") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_depth_estimation_fp16(self): self.run_task_tests(task="depth-estimation", torch_dtype="float16") @is_pipeline_test @require_pytesseract @require_torch @require_vision def test_pipeline_document_question_answering(self): self.run_task_tests(task="document-question-answering") @is_pipeline_test @require_pytesseract @require_torch @require_vision def test_pipeline_document_question_answering_fp16(self): self.run_task_tests(task="document-question-answering", torch_dtype="float16") @is_pipeline_test def test_pipeline_feature_extraction(self): self.run_task_tests(task="feature-extraction") @is_pipeline_test @require_torch def test_pipeline_feature_extraction_fp16(self): self.run_task_tests(task="feature-extraction", torch_dtype="float16") @is_pipeline_test def test_pipeline_fill_mask(self): self.run_task_tests(task="fill-mask") @is_pipeline_test @require_torch def test_pipeline_fill_mask_fp16(self): self.run_task_tests(task="fill-mask", torch_dtype="float16") @is_pipeline_test @require_torch_or_tf @require_vision def test_pipeline_image_classification(self): self.run_task_tests(task="image-classification") @is_pipeline_test @require_vision @require_torch def test_pipeline_image_classification_fp16(self): self.run_task_tests(task="image-classification", torch_dtype="float16") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_image_segmentation(self): self.run_task_tests(task="image-segmentation") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_image_segmentation_fp16(self): self.run_task_tests(task="image-segmentation", torch_dtype="float16") @is_pipeline_test @require_vision @require_torch def test_pipeline_image_text_to_text(self): self.run_task_tests(task="image-text-to-text") @is_pipeline_test @require_vision @require_torch def test_pipeline_image_text_to_text_fp16(self): self.run_task_tests(task="image-text-to-text", torch_dtype="float16") @is_pipeline_test @require_vision def test_pipeline_image_to_text(self): self.run_task_tests(task="image-to-text") @is_pipeline_test @require_vision @require_torch def test_pipeline_image_to_text_fp16(self): self.run_task_tests(task="image-to-text", torch_dtype="float16") @is_pipeline_test @require_timm @require_vision @require_torch def test_pipeline_image_feature_extraction(self): self.run_task_tests(task="image-feature-extraction") @is_pipeline_test @require_timm @require_vision @require_torch def test_pipeline_image_feature_extraction_fp16(self): self.run_task_tests(task="image-feature-extraction", torch_dtype="float16") @unittest.skip(reason="`run_pipeline_test` is currently not implemented.") @is_pipeline_test @require_vision @require_torch def test_pipeline_mask_generation(self): self.run_task_tests(task="mask-generation") @unittest.skip(reason="`run_pipeline_test` is currently not implemented.") @is_pipeline_test @require_vision @require_torch def test_pipeline_mask_generation_fp16(self): self.run_task_tests(task="mask-generation", torch_dtype="float16") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_object_detection(self): self.run_task_tests(task="object-detection") @is_pipeline_test @require_vision @require_timm @require_torch def test_pipeline_object_detection_fp16(self): self.run_task_tests(task="object-detection", torch_dtype="float16") @is_pipeline_test def test_pipeline_question_answering(self): self.run_task_tests(task="question-answering") @is_pipeline_test @require_torch def test_pipeline_question_answering_fp16(self): self.run_task_tests(task="question-answering", torch_dtype="float16") @is_pipeline_test def test_pipeline_summarization(self): self.run_task_tests(task="summarization") @is_pipeline_test @require_torch def test_pipeline_summarization_fp16(self): self.run_task_tests(task="summarization", torch_dtype="float16") @is_pipeline_test def test_pipeline_table_question_answering(self): self.run_task_tests(task="table-question-answering") @is_pipeline_test @require_torch def test_pipeline_table_question_answering_fp16(self): self.run_task_tests(task="table-question-answering", torch_dtype="float16") @is_pipeline_test def test_pipeline_text2text_generation(self): self.run_task_tests(task="text2text-generation") @is_pipeline_test @require_torch def test_pipeline_text2text_generation_fp16(self): self.run_task_tests(task="text2text-generation", torch_dtype="float16") @is_pipeline_test def test_pipeline_text_classification(self): self.run_task_tests(task="text-classification") @is_pipeline_test @require_torch def test_pipeline_text_classification_fp16(self): self.run_task_tests(task="text-classification", torch_dtype="float16") @is_pipeline_test @require_torch_or_tf def test_pipeline_text_generation(self): self.run_task_tests(task="text-generation") @is_pipeline_test @require_torch def test_pipeline_text_generation_fp16(self): self.run_task_tests(task="text-generation", torch_dtype="float16") @is_pipeline_test @require_torch def test_pipeline_text_to_audio(self): self.run_task_tests(task="text-to-audio") @is_pipeline_test @require_torch def test_pipeline_text_to_audio_fp16(self): self.run_task_tests(task="text-to-audio", torch_dtype="float16") @is_pipeline_test def test_pipeline_token_classification(self): self.run_task_tests(task="token-classification") @is_pipeline_test @require_torch def test_pipeline_token_classification_fp16(self): self.run_task_tests(task="token-classification", torch_dtype="float16") @is_pipeline_test def test_pipeline_translation(self): self.run_task_tests(task="translation") @is_pipeline_test @require_torch def test_pipeline_translation_fp16(self): self.run_task_tests(task="translation", torch_dtype="float16") @is_pipeline_test @require_torch_or_tf @require_vision @require_av def test_pipeline_video_classification(self): self.run_task_tests(task="video-classification") @is_pipeline_test @require_vision @require_torch @require_av def test_pipeline_video_classification_fp16(self): self.run_task_tests(task="video-classification", torch_dtype="float16") @is_pipeline_test @require_torch @require_vision def test_pipeline_visual_question_answering(self): self.run_task_tests(task="visual-question-answering") @is_pipeline_test @require_torch @require_vision def test_pipeline_visual_question_answering_fp16(self): self.run_task_tests(task="visual-question-answering", torch_dtype="float16") @is_pipeline_test def test_pipeline_zero_shot(self): self.run_task_tests(task="zero-shot") @is_pipeline_test @require_torch def test_pipeline_zero_shot_fp16(self): self.run_task_tests(task="zero-shot", torch_dtype="float16") @is_pipeline_test @require_torch def test_pipeline_zero_shot_audio_classification(self): self.run_task_tests(task="zero-shot-audio-classification") @is_pipeline_test @require_torch def test_pipeline_zero_shot_audio_classification_fp16(self): self.run_task_tests(task="zero-shot-audio-classification", torch_dtype="float16") @is_pipeline_test @require_vision def test_pipeline_zero_shot_image_classification(self): self.run_task_tests(task="zero-shot-image-classification") @is_pipeline_test @require_vision @require_torch def test_pipeline_zero_shot_image_classification_fp16(self): self.run_task_tests(task="zero-shot-image-classification", torch_dtype="float16") @is_pipeline_test @require_vision @require_torch def test_pipeline_zero_shot_object_detection(self): self.run_task_tests(task="zero-shot-object-detection") @is_pipeline_test @require_vision @require_torch def test_pipeline_zero_shot_object_detection_fp16(self): self.run_task_tests(task="zero-shot-object-detection", torch_dtype="float16") # This contains the test cases to be skipped without model architecture being involved. def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): """Skip some tests based on the classes or their names without the instantiated objects. This is to avoid calling `from_pretrained` (so reducing the runtime) if we already know the tests will fail. """ # No fix is required for this case. if ( pipeline_test_case_name == "DocumentQuestionAnsweringPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `DocumentQuestionAnsweringPipelineTests` requires a fast tokenizer. return True return False def is_pipeline_test_to_skip_more( self, pipeline_test_case_name, config, model, tokenizer, image_processor=None, feature_extractor=None, processor=None, ): # noqa """Skip some more tests based on the information from the instantiated objects.""" # No fix is required for this case. if ( pipeline_test_case_name == "QAPipelineTests" and tokenizer is not None and getattr(tokenizer, "pad_token", None) is None and not tokenizer.__class__.__name__.endswith("Fast") ): # `QAPipelineTests` doesn't work with a slow tokenizer that has no pad token. return True return False def validate_test_components(model, tokenizer): # TODO: Move this to tiny model creation script # head-specific (within a model type) necessary changes to the config # 1. for `BlenderbotForCausalLM` if model.__class__.__name__ == "BlenderbotForCausalLM": model.config.encoder_no_repeat_ngram_size = 0 # TODO: Change the tiny model creation script: don't create models with problematic tokenizers # Avoid `IndexError` in embedding layers CONFIG_WITHOUT_VOCAB_SIZE = ["CanineConfig"] if tokenizer is not None: # Removing `decoder=True` in `get_text_config` can lead to conflicting values e.g. in MusicGen config_vocab_size = getattr(model.config.get_text_config(decoder=True), "vocab_size", None) # For CLIP-like models if config_vocab_size is None: if hasattr(model.config, "text_encoder"): config_vocab_size = getattr(model.config.text_config, "vocab_size", None) if config_vocab_size is None and model.config.__class__.__name__ not in CONFIG_WITHOUT_VOCAB_SIZE: raise ValueError( "Could not determine `vocab_size` from model configuration while `tokenizer` is not `None`." ) def get_arg_names_from_hub_spec(hub_spec, first_level=True): # This util is used in pipeline tests, to verify that a pipeline's documented arguments # match the Hub specification for that task arg_names = [] for field in fields(hub_spec): # Recurse into nested fields, but max one level if is_dataclass(field.type): arg_names.extend([field.name for field in fields(field.type)]) continue # Next, catch nested fields that are part of a Union[], which is usually caused by Optional[] for param_type in get_args(field.type): if is_dataclass(param_type): # Again, recurse into nested fields, but max one level arg_names.extend([field.name for field in fields(param_type)]) break else: # Finally, this line triggers if it's not a nested field arg_names.append(field.name) return arg_names def parse_args_from_docstring_by_indentation(docstring): # This util is used in pipeline tests, to extract the argument names from a google-format docstring # to compare them against the Hub specification for that task. It uses indentation levels as a primary # source of truth, so these have to be correct! docstring = dedent(docstring) lines_by_indent = [ (len(line) - len(line.lstrip()), line.strip()) for line in docstring.split("\n") if line.strip() ] args_lineno = None args_indent = None args_end = None for lineno, (indent, line) in enumerate(lines_by_indent): if line == "Args:": args_lineno = lineno args_indent = indent continue elif args_lineno is not None and indent == args_indent: args_end = lineno break if args_lineno is None: raise ValueError("No args block to parse!") elif args_end is None: args_block = lines_by_indent[args_lineno + 1 :] else: args_block = lines_by_indent[args_lineno + 1 : args_end] outer_indent_level = min(line[0] for line in args_block) outer_lines = [line for line in args_block if line[0] == outer_indent_level] arg_names = [re.match(r"(\w+)\W", line[1]).group(1) for line in outer_lines] return arg_names def compare_pipeline_args_to_hub_spec(pipeline_class, hub_spec): ALLOWED_TRANSFORMERS_ONLY_ARGS = ["timeout"] docstring = inspect.getdoc(pipeline_class.__call__).strip() docstring_args = set(parse_args_from_docstring_by_indentation(docstring)) hub_args = set(get_arg_names_from_hub_spec(hub_spec)) # Special casing: We allow the name of this arg to differ js_generate_args = [js_arg for js_arg in hub_args if js_arg.startswith("generate")] docstring_generate_args = [ docstring_arg for docstring_arg in docstring_args if docstring_arg.startswith("generate") ] if ( len(js_generate_args) == 1 and len(docstring_generate_args) == 1 and js_generate_args != docstring_generate_args ): hub_args.remove(js_generate_args[0]) docstring_args.remove(docstring_generate_args[0]) # Special casing 2: We permit some transformers-only arguments that don't affect pipeline output for arg in ALLOWED_TRANSFORMERS_ONLY_ARGS: if arg in docstring_args and arg not in hub_args: docstring_args.remove(arg) if hub_args != docstring_args: error = [f"{pipeline_class.__name__} differs from JS spec {hub_spec.__name__}"] matching_args = hub_args & docstring_args huggingface_hub_only = hub_args - docstring_args transformers_only = docstring_args - hub_args if matching_args: error.append(f"Matching args: {matching_args}") if huggingface_hub_only: error.append(f"Huggingface Hub only: {huggingface_hub_only}") if transformers_only: error.append(f"Transformers only: {transformers_only}") raise ValueError("\n".join(error))
transformers/tests/test_pipeline_mixin.py/0
{ "file_path": "transformers/tests/test_pipeline_mixin.py", "repo_id": "transformers", "token_count": 16841 }
# coding=utf-8 # Copyright 2018 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest import numpy as np from transformers.data.data_collator import default_data_collator from transformers.testing_utils import require_accelerate, require_torch from transformers.trainer_utils import RemoveColumnsCollator, find_executable_batch_size from transformers.utils import is_torch_available if is_torch_available(): import torch from torch import nn from torch.utils.data import IterableDataset from transformers.modeling_outputs import SequenceClassifierOutput from transformers.tokenization_utils_base import BatchEncoding from transformers.trainer_pt_utils import ( DistributedLengthGroupedSampler, DistributedSamplerWithLoop, DistributedTensorGatherer, EvalLoopContainer, IterableDatasetShard, LabelSmoother, LengthGroupedSampler, SequentialDistributedSampler, ShardSampler, get_parameter_names, numpy_pad_and_concatenate, torch_pad_and_concatenate, ) class TstLayer(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear1 = nn.Linear(hidden_size, hidden_size) self.ln1 = nn.LayerNorm(hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.ln2 = nn.LayerNorm(hidden_size) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): h = self.ln1(nn.functional.relu(self.linear1(x))) h = nn.functional.relu(self.linear2(x)) return self.ln2(x + h + self.bias) class RandomIterableDataset(IterableDataset): # For testing, an iterable dataset of random length def __init__(self, p_stop=0.01, max_length=1000): self.p_stop = p_stop self.max_length = max_length self.generator = torch.Generator() def __iter__(self): count = 0 stop = False while not stop and count < self.max_length: yield count count += 1 number = torch.rand(1, generator=self.generator).item() stop = number < self.p_stop @require_torch class TrainerUtilsTest(unittest.TestCase): def test_distributed_tensor_gatherer(self): # Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1 world_size = 4 num_samples = 21 input_indices = [ [0, 1, 6, 7, 12, 13, 18, 19], [2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1], [5, 11, 17, 2], ] predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices in input_indices: gatherer.add_arrays(predictions[indices]) result = gatherer.finalize() self.assertTrue(np.array_equal(result, predictions)) # With nested tensors gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices in input_indices: gatherer.add_arrays([predictions[indices], [predictions[indices], predictions[indices]]]) result = gatherer.finalize() self.assertTrue(isinstance(result, list)) self.assertEqual(len(result), 2) self.assertTrue(isinstance(result[1], list)) self.assertEqual(len(result[1]), 2) self.assertTrue(np.array_equal(result[0], predictions)) self.assertTrue(np.array_equal(result[1][0], predictions)) self.assertTrue(np.array_equal(result[1][1], predictions)) def test_distributed_tensor_gatherer_different_shapes(self): # Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1 world_size = 4 num_samples = 21 input_indices = [ [0, 1, 6, 7, 12, 13, 18, 19], [2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1], [5, 11, 17, 2], ] sequence_lengths = [8, 10, 13] predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays(predictions[indices, :seq_length]) result = gatherer.finalize() # Remove the extra samples added at the end for a round multiple of num processes. actual_indices = [input_indices[0], input_indices[1][:-2], input_indices[2][:-1]] for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[indices, :seq_length], predictions[indices, :seq_length])) # With nested tensors predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays([predictions[indices, :seq_length], predictions[indices]]) result = gatherer.finalize() for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[0][indices, :seq_length], predictions[indices, :seq_length])) self.assertTrue(np.array_equal(result[1], predictions)) # Check if works if varying seq_length is second gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays([predictions[indices], predictions[indices, :seq_length]]) result = gatherer.finalize() self.assertTrue(np.array_equal(result[0], predictions)) for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[1][indices, :seq_length], predictions[indices, :seq_length])) def test_label_smoothing(self): epsilon = 0.1 num_labels = 12 random_logits = torch.randn(4, 5, num_labels) random_labels = torch.randint(0, num_labels, (4, 5)) loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1)) model_output = SequenceClassifierOutput(logits=random_logits) label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels) log_probs = -nn.functional.log_softmax(random_logits, dim=-1) expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean() torch.testing.assert_close(label_smoothed_loss, expected_loss) # With a few -100 labels random_labels[0, 1] = -100 random_labels[2, 1] = -100 random_labels[2, 3] = -100 loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1)) model_output = SequenceClassifierOutput(logits=random_logits) label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels) log_probs = -nn.functional.log_softmax(random_logits, dim=-1) # Mask the log probs with the -100 labels log_probs[0, 1] = 0.0 log_probs[2, 1] = 0.0 log_probs[2, 3] = 0.0 expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17) torch.testing.assert_close(label_smoothed_loss, expected_loss) def test_group_by_length(self): # Get some inputs of random lengths lengths = torch.randint(0, 25, (100,)).tolist() # Put one bigger than the others to check it ends up in first position lengths[32] = 50 indices = list(LengthGroupedSampler(4, lengths=lengths)) # The biggest element should be first self.assertEqual(lengths[indices[0]], 50) # The indices should be a permutation of range(100) self.assertEqual(sorted(indices), list(range(100))) def test_group_by_length_with_dict(self): # Get some inputs of random lengths data = [] for _ in range(6): input_ids = torch.randint(0, 25, (100,)).tolist() data.append({"input_ids": input_ids}) # Put one bigger than the others to check it ends up in first position data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist() indices = list(LengthGroupedSampler(4, dataset=data)) # The biggest element should be first self.assertEqual(len(data[indices[0]]["input_ids"]), 105) # The indices should be a permutation of range(6) self.assertEqual(sorted(indices), list(range(6))) def test_group_by_length_with_batch_encoding(self): # Get some inputs of random lengths data = [] for _ in range(6): input_ids = torch.randint(0, 25, (100,)).tolist() data.append(BatchEncoding({"input_ids": input_ids})) # Put one bigger than the others to check it ends up in first position data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist() indices = list(LengthGroupedSampler(4, dataset=data)) # The biggest element should be first self.assertEqual(len(data[indices[0]]["input_ids"]), 105) # The indices should be a permutation of range(6) self.assertEqual(sorted(indices), list(range(6))) def test_distributed_length_grouped(self): # Get some inputs of random lengths lengths = torch.randint(0, 25, (100,)).tolist() # Put one bigger than the others to check it ends up in first position lengths[32] = 50 indices_process_0 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=0, lengths=lengths)) indices_process_1 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=1, lengths=lengths)) # The biggest element should be first self.assertEqual(lengths[indices_process_0[0]], 50) # The indices should be a permutation of range(100) self.assertEqual(sorted(indices_process_0 + indices_process_1), list(range(100))) def test_get_parameter_names(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) # fmt: off self.assertEqual( get_parameter_names(model, [nn.LayerNorm]), ['0.linear1.weight', '0.linear1.bias', '0.linear2.weight', '0.linear2.bias', '0.bias', '1.0.linear1.weight', '1.0.linear1.bias', '1.0.linear2.weight', '1.0.linear2.bias', '1.0.bias', '1.1.linear1.weight', '1.1.linear1.bias', '1.1.linear2.weight', '1.1.linear2.bias', '1.1.bias'] ) # fmt: on def test_get_parameter_names_rmsnorm(self): class RMSNorm(nn.Module): def __init__(self, hidden_size): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) class ModelWithRMSNorm(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(128, 128) self.rmsnorm = RMSNorm(128) self.bias = nn.Parameter(torch.zeros(128)) model = ModelWithRMSNorm() # Test both type-based and name-based filtering decay_parameters = get_parameter_names(model, [], ["bias", "rmsnorm"]) # Parameters that should be in weight decay self.assertIn("linear.weight", decay_parameters) # Parameters that should NOT be in weight decay self.assertNotIn("linear.bias", decay_parameters) self.assertNotIn("rmsnorm.weight", decay_parameters) self.assertNotIn("rmsnorm.bias", decay_parameters) self.assertNotIn("bias", decay_parameters) def test_distributed_sampler_with_loop(self): batch_size = 16 for length in [23, 64, 123]: dataset = list(range(length)) shard1 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=0) shard2 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=1) # Set seeds shard1.set_epoch(0) shard2.set_epoch(0) # Sample samples1 = list(shard1) samples2 = list(shard2) self.assertTrue(len(samples1) % batch_size == 0) self.assertTrue(len(samples2) % batch_size == 0) total = [] for sample1, sample2 in zip(samples1, samples2): total += [sample1, sample2] self.assertEqual(set(total[:length]), set(dataset)) self.assertEqual(set(total[length:]), set(total[: (len(total) - length)])) def test_sequential_distributed_sampler(self): batch_size = 16 for length in [23, 64, 123]: dataset = list(range(length)) shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0) shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1) # Sample samples1 = list(shard1) samples2 = list(shard2) total = samples1 + samples2 self.assertListEqual(total[:length], dataset) self.assertListEqual(total[length:], dataset[: (len(total) - length)]) # With a batch_size passed shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0, batch_size=batch_size) shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1, batch_size=batch_size) # Sample samples1 = list(shard1) samples2 = list(shard2) self.assertTrue(len(samples1) % batch_size == 0) self.assertTrue(len(samples2) % batch_size == 0) total = samples1 + samples2 self.assertListEqual(total[:length], dataset) self.assertListEqual(total[length:], dataset[: (len(total) - length)]) def check_iterable_dataset_shard(self, dataset, batch_size, drop_last, num_processes=2, epoch=0): # Set the seed for the base dataset to get the proper reference. dataset.generator.manual_seed(epoch) reference = list(dataset) shards = [ IterableDatasetShard( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] for shard in shards: shard.set_epoch(epoch) shard_lists = [list(shard) for shard in shards] for shard in shard_lists: # All shards have a number of samples that is a round multiple of batch size self.assertTrue(len(shard) % batch_size == 0) # All shards have the same number of samples self.assertEqual(len(shard), len(shard_lists[0])) for shard in shards: # All shards know the total number of samples self.assertEqual(shard.num_examples, len(reference)) observed = [] for idx in range(0, len(shard_lists[0]), batch_size): for shard in shard_lists: observed += shard[idx : idx + batch_size] # If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of # batch_size if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) # Check equivalence between IterableDataset and ShardSampler dataset.generator.manual_seed(epoch) reference = list(dataset) sampler_shards = [ ShardSampler( reference, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] for shard, sampler_shard in zip(shard_lists, sampler_shards): self.assertListEqual(shard, list(sampler_shard)) def test_iterable_dataset_shard(self): dataset = RandomIterableDataset() self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0) self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=2, epoch=0) self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=3, epoch=42) self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=3, epoch=42) def test_iterable_dataset_shard_with_length(self): sampler_shards = [ IterableDatasetShard(list(range(100)), batch_size=4, drop_last=True, num_processes=2, process_index=i) for i in range(2) ] # Build expected shards: each process will have batches of size 4 until there is not enough elements to # form two full batches (so we stop at 96 = (100 // (4 * 2)) * 4) expected_shards = [[], []] current_shard = 0 for i in range(0, 96, 4): expected_shards[current_shard].extend(list(range(i, i + 4))) current_shard = 1 - current_shard self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards) self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards]) sampler_shards = [ IterableDatasetShard(list(range(100)), batch_size=4, drop_last=False, num_processes=2, process_index=i) for i in range(2) ] # When drop_last=False, we get two last full batches by looping back to the beginning. expected_shards[0].extend(list(range(96, 100))) expected_shards[1].extend(list(range(0, 4))) self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards) self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards]) def check_shard_sampler(self, dataset, batch_size, drop_last, num_processes=2): shards = [ ShardSampler( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] shard_lists = [list(shard) for shard in shards] for shard in shard_lists: # All shards have a number of samples that is a round multiple of batch size self.assertTrue(len(shard) % batch_size == 0) # All shards have the same number of samples self.assertEqual(len(shard), len(shard_lists[0])) observed = [] for idx in range(0, len(shard_lists[0]), batch_size): for shard in shard_lists: observed += shard[idx : idx + batch_size] # If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of # batch_size reference = copy.copy(dataset) if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) def test_shard_sampler(self): for n_elements in [64, 123]: dataset = list(range(n_elements)) self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=2) self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=2) self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=3) self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=3) @require_accelerate def test_executable_batch_size(self): batch_sizes = [] @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=True) def mock_training_loop_function(batch_size): nonlocal batch_sizes batch_sizes.append(batch_size) if batch_size > 16: raise RuntimeError("CUDA out of memory.") mock_training_loop_function() self.assertEqual(batch_sizes, [64, 32, 16]) @require_accelerate def test_executable_batch_size_no_search(self): batch_sizes = [] @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False) def mock_training_loop_function(batch_size): nonlocal batch_sizes batch_sizes.append(batch_size) mock_training_loop_function() self.assertEqual(batch_sizes, [64]) @require_accelerate def test_executable_batch_size_with_error(self): @find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False) def mock_training_loop_function(batch_size): raise RuntimeError("CUDA out of memory.") with self.assertRaises(RuntimeError) as cm: mock_training_loop_function() self.assertEqual("CUDA out of memory", cm.args[0]) def test_pad_and_concatenate_with_1d(self): """Tests whether pad_and_concatenate works with scalars.""" array1 = 1.0 array2 = 2.0 result = numpy_pad_and_concatenate(array1, array2) self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result)) tensor1 = torch.tensor(1.0) tensor2 = torch.tensor(2.0) result = torch_pad_and_concatenate(tensor1, tensor2) self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0]))) def test_remove_columns_collator(self): class MockLogger: def __init__(self) -> None: self.called = 0 def info(self, msg): self.called += 1 self.last_msg = msg data_batch = [ {"col1": 1, "col2": 2, "col3": 3}, {"col1": 1, "col2": 2, "col3": 3}, ] logger = MockLogger() remove_columns_collator = RemoveColumnsCollator( default_data_collator, ["col1", "col2"], logger, "model", "training" ) self.assertNotIn("col3", remove_columns_collator(data_batch)) # check that the logging message is printed out only once remove_columns_collator(data_batch) remove_columns_collator(data_batch) self.assertEqual(logger.called, 1) self.assertIn("col3", logger.last_msg) def test_eval_loop_container(self): batch_1 = [ torch.ones([8, 5]), {"loss": torch.tensor(1.0)}, (torch.ones([8, 2, 3]), torch.ones([8, 2])), ] batch_2 = [ torch.ones([4, 5]), {"loss": torch.tensor(2.0)}, (torch.ones([4, 2, 3]), torch.ones([4, 6])), ] concat_container = EvalLoopContainer(do_nested_concat=True, padding_index=-100) concat_container.add(batch_1) concat_container.add(batch_2) concat_container.to_cpu_and_numpy() arrays = concat_container.get_arrays() # Test two nested batches concatenation self.assertIsInstance(arrays, list) self.assertEqual(len(arrays), 3) self.assertIsInstance(arrays[0], np.ndarray) self.assertEqual(arrays[0].shape, (12, 5)) self.assertIsInstance(arrays[1], dict) self.assertIsInstance(arrays[1]["loss"], np.ndarray) self.assertEqual(arrays[1]["loss"].shape, (2,)) self.assertTrue(np.allclose(arrays[1]["loss"], np.array([1.0, 2.0]))) self.assertIsInstance(arrays[2], tuple) self.assertEqual(len(arrays[2]), 2) self.assertEqual(arrays[2][0].shape, (12, 2, 3)) self.assertEqual(arrays[2][1].shape, (12, 6)) # check that first batch padded with padding index -100 after concatenation self.assertEqual(arrays[2][1][0][2], -100) # Test two batches with no concatenation list_container = EvalLoopContainer(do_nested_concat=False) list_container.add(batch_1) list_container.add(batch_2) list_container.to_cpu_and_numpy() arrays = list_container.get_arrays() self.assertEqual(len(arrays), 2) self.assertIsInstance(arrays, list) np_batch_1, np_batch_2 = arrays self.assertIsInstance(np_batch_1, list) self.assertEqual(len(np_batch_1), 3) self.assertIsInstance(np_batch_1[0], np.ndarray) self.assertIsInstance(np_batch_1[1], dict) self.assertIsInstance(np_batch_1[2], tuple) self.assertEqual(np_batch_1[0].shape, (8, 5)) self.assertEqual(np_batch_1[1]["loss"].shape, ()) self.assertEqual(np_batch_1[2][0].shape, (8, 2, 3)) self.assertEqual(np_batch_1[2][1].shape, (8, 2)) self.assertIsInstance(np_batch_2, list) self.assertEqual(len(np_batch_2), 3) self.assertIsInstance(np_batch_2[0], np.ndarray) self.assertIsInstance(np_batch_2[1], dict) self.assertIsInstance(np_batch_2[2], tuple) self.assertEqual(np_batch_2[0].shape, (4, 5)) self.assertEqual(np_batch_2[1]["loss"].shape, ()) self.assertEqual(np_batch_2[2][0].shape, (4, 2, 3)) self.assertEqual(np_batch_2[2][1].shape, (4, 6)) # Test no batches none_arr = EvalLoopContainer(do_nested_concat=True, padding_index=-100).get_arrays() self.assertIsNone(none_arr) none_arr = EvalLoopContainer(do_nested_concat=False).get_arrays() self.assertIsNone(none_arr) # Test one batch concat_container = EvalLoopContainer(do_nested_concat=True, padding_index=-100) concat_container.add(batch_1) arrays = concat_container.get_arrays() self.assertIsInstance(arrays, list) self.assertEqual(len(arrays), 3) self.assertIsInstance(arrays[0], np.ndarray) self.assertEqual(arrays[0].shape, (8, 5)) self.assertIsInstance(arrays[1], dict) self.assertIsInstance(arrays[1]["loss"], np.ndarray) self.assertEqual(arrays[1]["loss"].shape, ()) self.assertTrue(np.allclose(arrays[1]["loss"], np.array([1.0]))) self.assertIsInstance(arrays[2], tuple) self.assertEqual(len(arrays[2]), 2) self.assertEqual(arrays[2][0].shape, (8, 2, 3)) self.assertEqual(arrays[2][1].shape, (8, 2))
transformers/tests/trainer/test_trainer_utils.py/0
{ "file_path": "transformers/tests/trainer/test_trainer_utils.py", "repo_id": "transformers", "token_count": 12025 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import warnings from parameterized import parameterized from transformers import __version__, is_torch_available from transformers.testing_utils import require_torch_gpu from transformers.utils.deprecation import deprecate_kwarg if is_torch_available(): import torch INFINITE_VERSION = "9999.0.0" class DeprecationDecoratorTester(unittest.TestCase): def test_rename_kwarg(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") @deprecate_kwarg("deprecated_name", new_name="new_name", version=INFINITE_VERSION) def dummy_function(new_name=None, other_name=None): return new_name, other_name # Test keyword argument is renamed value, other_value = dummy_function(deprecated_name="old_value") self.assertEqual(value, "old_value") self.assertIsNone(other_value) # Test deprecated keyword argument not passed value, other_value = dummy_function(new_name="new_value") self.assertEqual(value, "new_value") self.assertIsNone(other_value) # Test other keyword argument value, other_value = dummy_function(other_name="other_value") self.assertIsNone(value) self.assertEqual(other_value, "other_value") # Test deprecated and new args are passed, the new one should be returned value, other_value = dummy_function(deprecated_name="old_value", new_name="new_value") self.assertEqual(value, "new_value") self.assertIsNone(other_value) def test_rename_multiple_kwargs(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") @deprecate_kwarg("deprecated_name1", new_name="new_name1", version=INFINITE_VERSION) @deprecate_kwarg("deprecated_name2", new_name="new_name2", version=INFINITE_VERSION) def dummy_function(new_name1=None, new_name2=None, other_name=None): return new_name1, new_name2, other_name # Test keyword argument is renamed value1, value2, other_value = dummy_function(deprecated_name1="old_value1", deprecated_name2="old_value2") self.assertEqual(value1, "old_value1") self.assertEqual(value2, "old_value2") self.assertIsNone(other_value) # Test deprecated keyword argument is not passed value1, value2, other_value = dummy_function(new_name1="new_value1", new_name2="new_value2") self.assertEqual(value1, "new_value1") self.assertEqual(value2, "new_value2") self.assertIsNone(other_value) # Test other keyword argument is passed and correctly returned value1, value2, other_value = dummy_function(other_name="other_value") self.assertIsNone(value1) self.assertIsNone(value2) self.assertEqual(other_value, "other_value") def test_warnings(self): # Test warning is raised for future version @deprecate_kwarg("deprecated_name", new_name="new_name", version=INFINITE_VERSION) def dummy_function(new_name=None, other_name=None): return new_name, other_name with self.assertWarns(FutureWarning): dummy_function(deprecated_name="old_value") # Test warning is not raised for past version, but arg is still renamed @deprecate_kwarg("deprecated_name", new_name="new_name", version="0.0.0") def dummy_function(new_name=None, other_name=None): return new_name, other_name with warnings.catch_warnings(record=True) as raised_warnings: warnings.simplefilter("always") value, other_value = dummy_function(deprecated_name="old_value") self.assertEqual(value, "old_value") self.assertIsNone(other_value) self.assertEqual(len(raised_warnings), 0, f"Warning raised: {[w.message for w in raised_warnings]}") # Test warning is raised for future version if warn_if_greater_or_equal_version is set @deprecate_kwarg("deprecated_name", version="0.0.0", warn_if_greater_or_equal_version=True) def dummy_function(deprecated_name=None): return deprecated_name with self.assertWarns(FutureWarning): value = dummy_function(deprecated_name="deprecated_value") self.assertEqual(value, "deprecated_value") # Test arg is not renamed if new_name is not specified, but warning is raised @deprecate_kwarg("deprecated_name", version=INFINITE_VERSION) def dummy_function(deprecated_name=None): return deprecated_name with self.assertWarns(FutureWarning): value = dummy_function(deprecated_name="deprecated_value") self.assertEqual(value, "deprecated_value") def test_raises(self): # Test if deprecated name and new name are both passed and raise_if_both_names is set -> raise error @deprecate_kwarg("deprecated_name", new_name="new_name", version=INFINITE_VERSION, raise_if_both_names=True) def dummy_function(new_name=None, other_name=None): return new_name, other_name with self.assertRaises(ValueError): dummy_function(deprecated_name="old_value", new_name="new_value") # Test for current version == deprecation version @deprecate_kwarg("deprecated_name", version=__version__, raise_if_greater_or_equal_version=True) def dummy_function(deprecated_name=None): return deprecated_name with self.assertRaises(ValueError): dummy_function(deprecated_name="old_value") # Test for current version > deprecation version @deprecate_kwarg("deprecated_name", version="0.0.0", raise_if_greater_or_equal_version=True) def dummy_function(deprecated_name=None): return deprecated_name with self.assertRaises(ValueError): dummy_function(deprecated_name="old_value") def test_additional_message(self): # Test additional message is added to the warning @deprecate_kwarg("deprecated_name", version=INFINITE_VERSION, additional_message="Additional message") def dummy_function(deprecated_name=None): return deprecated_name with warnings.catch_warnings(record=True) as raised_warnings: warnings.simplefilter("always") dummy_function(deprecated_name="old_value") self.assertTrue("Additional message" in str(raised_warnings[0].message)) @parameterized.expand(["0.0.0", __version__, INFINITE_VERSION]) def test_warning_for_both_names(self, version): # We should raise warning if both names are passed for any specified version @deprecate_kwarg("deprecated_name", new_name="new_name", version=version) def dummy_function(new_name=None, **kwargs): return new_name with self.assertWarns(FutureWarning): result = dummy_function(deprecated_name="old_value", new_name="new_value") self.assertEqual(result, "new_value") @require_torch_gpu def test_compile_safe(self): @deprecate_kwarg("deprecated_factor", new_name="new_factor", version=INFINITE_VERSION) def dummy_function(new_factor=None, **kwargs): return new_factor * torch.ones(1, device="cuda") compiled_function = torch.compile(dummy_function, fullgraph=True) # Check that we can correctly call the compiled function with the old name, without raising errors out = compiled_function(deprecated_factor=2) self.assertEqual(out.item(), 2) # Check that we can correctly call the compiled function with the new name, without raising errors out = compiled_function(new_factor=2) self.assertEqual(out.item(), 2) # Check that we can correctly call the compiled function with both names, without raising errors out = compiled_function(new_factor=2, deprecated_factor=10) self.assertEqual(out.item(), 2)
transformers/tests/utils/test_deprecation.py/0
{ "file_path": "transformers/tests/utils/test_deprecation.py", "repo_id": "transformers", "token_count": 3428 }
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import os import tempfile from importlib import import_module from math import isnan from transformers import is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import _tf_gpu_memory_limit, require_tf, slow from ..test_modeling_tf_common import ids_tensor if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, ) from transformers.modeling_tf_utils import keras if _tf_gpu_memory_limit is not None: gpus = tf.config.list_physical_devices("GPU") for gpu in gpus: # Restrict TensorFlow to only allocate x GB of memory on the GPUs try: tf.config.set_logical_device_configuration( gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)] ) logical_gpus = tf.config.list_logical_devices("GPU") print("Logical GPUs", logical_gpus) except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized print(e) @require_tf class TFCoreModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () test_mismatched_shapes = True test_resize_embeddings = True test_head_masking = True is_encoder_decoder = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), ]: inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict @slow def test_graph_mode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: inputs = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @tf.function def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) @slow def test_xla_mode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: inputs = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @tf.function(experimental_compile=True) def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) @slow def test_xla_fit(self): # This is a copy of the test_keras_fit method, but we use XLA compilation instead of eager config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) if getattr(model, "hf_compute_loss", None): # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # Is there a better way to remove these decoder inputs? prepared_for_class = { key: val for key, val in prepared_for_class.items() if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "decoder_input_ids") } possible_label_cols = { "labels", "label", "label_ids", "start_positions", "start_position", "end_positions", "end_position", "next_sentence_label", } label_names = possible_label_cols.intersection(set(prepared_for_class)) self.assertGreater(len(label_names), 0, msg="No matching label names found!") labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names} self.assertGreater(len(inputs_minus_labels), 0) # Make sure it works with XLA! model.compile(optimizer=keras.optimizers.SGD(0.0), jit_compile=True) # Make sure the model fits without crashing regardless of where we pass the labels history = model.fit( prepared_for_class, validation_data=prepared_for_class, steps_per_epoch=1, validation_steps=1, shuffle=False, verbose=0, ) loss = history.history["loss"][0] self.assertTrue(not isnan(loss)) val_loss = history.history["val_loss"][0] self.assertTrue(not isnan(val_loss)) # Now test it with separate labels, to make sure that path works in XLA too. model = model_class(config) model.compile(optimizer=keras.optimizers.SGD(0.0), jit_compile=True) history = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), steps_per_epoch=1, validation_steps=1, shuffle=False, verbose=0, ) loss = history.history["loss"][0] self.assertTrue(not isnan(loss)) val_loss = history.history["val_loss"][0] self.assertTrue(not isnan(val_loss)) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes[:2]: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) model.build_in_name_scope() num_out = len(model(class_inputs_dict)) for key in list(class_inputs_dict.keys()): # Remove keys not in the serving signature, as the SavedModel will not be compiled to deal with them if key not in model.input_signature: del class_inputs_dict[key] # Check it's a tensor, in case the inputs dict has some bools in it too elif isinstance(class_inputs_dict[key], tf.Tensor) and class_inputs_dict[key].dtype.is_integer: class_inputs_dict[key] = tf.cast(class_inputs_dict[key], tf.int32) if set(class_inputs_dict.keys()) != set(model.input_signature.keys()): continue # Some models have inputs that the preparation functions don't create, we skip those with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) if self.is_encoder_decoder: output_hidden_states = outputs["encoder_hidden_states"] output_attentions = outputs["encoder_attentions"] else: output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] self.assertEqual(len(outputs), num_out) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @slow def test_mixed_precision(self): keras.mixed_precision.set_global_policy("mixed_float16") # try/finally block to ensure subsequent tests run in float32 try: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) outputs = model(class_inputs_dict) self.assertIsNotNone(outputs) finally: keras.mixed_precision.set_global_policy("float32") @slow def test_train_pipeline_custom_model(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # head_mask and decoder_head_mask has different shapes than other input args if "head_mask" in inputs_dict: del inputs_dict["head_mask"] if "decoder_head_mask" in inputs_dict: del inputs_dict["decoder_head_mask"] if "cross_attn_head_mask" in inputs_dict: del inputs_dict["cross_attn_head_mask"] tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter if "T5" in main_layer_class.__name__: # Take the same values than in TFT5ModelTester for this shared layer shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared") config.use_cache = False main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } if hasattr(self.model_tester, "num_labels"): num_labels = self.model_tester.num_labels else: num_labels = 2 X = tf.data.Dataset.from_tensor_slices( (inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1))) ).batch(1) hidden_states = main_layer(symbolic_inputs)[0] outputs = keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states) model = keras.models.Model(inputs=symbolic_inputs, outputs=[outputs]) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"]) model.fit(X, epochs=1) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, keras.Model) model(inputs_dict) @slow def test_graph_mode_with_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) @tf.function def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) def _generate_random_bad_tokens(self, num_bad_tokens, model): # special tokens cannot be bad tokens special_tokens = [] if model.config.bos_token_id is not None: special_tokens.append(model.config.bos_token_id) if model.config.pad_token_id is not None: special_tokens.append(model.config.pad_token_id) if model.config.eos_token_id is not None: special_tokens.append(model.config.eos_token_id) # create random bad tokens that are not special tokens bad_tokens = [] while len(bad_tokens) < num_bad_tokens: token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0] if token not in special_tokens: bad_tokens.append(token) return bad_tokens def _check_generated_ids(self, output_ids): for token_id in output_ids[0].numpy().tolist(): self.assertGreaterEqual(token_id, 0) self.assertLess(token_id, self.model_tester.vocab_size) def _check_match_tokens(self, generated_ids, bad_words_ids): # for all bad word tokens for bad_word_ids in bad_words_ids: # for all slices in batch for generated_ids_slice in generated_ids: # for all word idx for i in range(len(bad_word_ids), len(generated_ids_slice)): # if tokens match if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids: return True return False
transformers/tests/utils/test_modeling_tf_core.py/0
{ "file_path": "transformers/tests/utils/test_modeling_tf_core.py", "repo_id": "transformers", "token_count": 9190 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that checks all docstrings of public objects have an argument section matching their signature. Use from the root of the repo with: ```bash python utils/check_docstrings.py ``` for a check that will error in case of inconsistencies (used by `make repo-consistency`). To auto-fix issues run: ```bash python utils/check_docstrings.py --fix_and_overwrite ``` which is used by `make fix-copies` (note that this fills what it cans, you might have to manually fill information like argument descriptions). """ import argparse import ast import enum import inspect import operator as op import re from pathlib import Path from typing import Any, Optional, Tuple, Union from check_repo import ignore_undocumented from git import Repo from transformers.utils import direct_transformers_import PATH_TO_REPO = Path(__file__).parent.parent.resolve() PATH_TO_TRANSFORMERS = Path("src").resolve() / "transformers" # This is to make sure the transformers module imported is the one in the repo. transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) OPTIONAL_KEYWORD = "*optional*" # Re pattern that catches args blocks in docstrings (with all variation around the name supported). _re_args = re.compile(r"^\s*(Args?|Arguments?|Attributes?|Params?|Parameters?):\s*$") # Re pattern that parses the start of an arg block: catches <name> (<description>) in those lines. _re_parse_arg = re.compile(r"^(\s*)(\S+)\s+\((.+)\)(?:\:|$)") # Re pattern that parses the end of a description of an arg (catches the default in *optional*, defaults to xxx). _re_parse_description = re.compile(r"\*optional\*, defaults to (.*)$") # This is a temporary list of objects to ignore while we progressively fix them. Do not add anything here, fix the # docstrings instead. If formatting should be ignored for the docstring, you can put a comment # no-format on the # line before the docstring. OBJECTS_TO_IGNORE = [ # Deprecated "InputExample", "InputFeatures", # Signature is *args/**kwargs "TFSequenceSummary", "TFBertTokenizer", "TFGPT2Tokenizer", # Going through an argument deprecation cycle, remove after v4.46 "HybridCache", "MambaCache", "SlidingWindowCache", "StaticCache", # Missing arguments in the docstring "ASTFeatureExtractor", "AlbertModel", "AlbertTokenizerFast", "AlignTextModel", "AlignVisionConfig", "AudioClassificationPipeline", "AutoformerConfig", "AutomaticSpeechRecognitionPipeline", "BarkCoarseConfig", "BarkConfig", "BarkFineConfig", "BarkSemanticConfig", "BartConfig", "BartTokenizerFast", "BarthezTokenizerFast", "BeitModel", "BertConfig", "BertJapaneseTokenizer", "BertModel", "BertTokenizerFast", "BigBirdConfig", "BigBirdForQuestionAnswering", "BigBirdModel", "BigBirdPegasusConfig", "BigBirdTokenizerFast", "BitImageProcessor", "BlenderbotConfig", "BlenderbotSmallConfig", "BlenderbotSmallTokenizerFast", "BlenderbotTokenizerFast", "Blip2QFormerConfig", "Blip2VisionConfig", "BlipTextConfig", "BlipVisionConfig", "BloomConfig", "BloomTokenizerFast", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", "BrosModel", "CamembertConfig", "CamembertModel", "CamembertTokenizerFast", "CanineModel", "CanineTokenizer", "ChineseCLIPTextModel", "ClapTextConfig", "ConditionalDetrConfig", "ConditionalDetrImageProcessor", "ConvBertConfig", "ConvBertTokenizerFast", "ConvNextConfig", "ConvNextV2Config", "CpmAntTokenizer", "CvtConfig", "CvtModel", "DeiTImageProcessor", "DPRReaderTokenizer", "DPRReaderTokenizerFast", "DPTModel", "Data2VecAudioConfig", "Data2VecTextConfig", "Data2VecTextModel", "Data2VecVisionModel", "DataCollatorForLanguageModeling", "DebertaConfig", "DebertaV2Config", "DebertaV2Tokenizer", "DebertaV2TokenizerFast", "DecisionTransformerConfig", "DeformableDetrConfig", "DeformableDetrImageProcessor", "DeiTModel", "DepthEstimationPipeline", "DetaConfig", "DetaImageProcessor", "DetrConfig", "DetrImageProcessor", "DinatModel", "DistilBertConfig", "DistilBertTokenizerFast", "DocumentQuestionAnsweringPipeline", "DonutSwinModel", "EarlyStoppingCallback", "EfficientFormerConfig", "EfficientFormerImageProcessor", "EfficientNetConfig", "ElectraConfig", "ElectraTokenizerFast", "EncoderDecoderModel", "ErnieMModel", "ErnieModel", "ErnieMTokenizer", "EsmConfig", "EsmModel", "FlaxAlbertForMaskedLM", "FlaxAlbertForMultipleChoice", "FlaxAlbertForPreTraining", "FlaxAlbertForQuestionAnswering", "FlaxAlbertForSequenceClassification", "FlaxAlbertForTokenClassification", "FlaxAlbertModel", "FlaxBartForCausalLM", "FlaxBartForConditionalGeneration", "FlaxBartForQuestionAnswering", "FlaxBartForSequenceClassification", "FlaxBartModel", "FlaxBeitForImageClassification", "FlaxBeitForMaskedImageModeling", "FlaxBeitModel", "FlaxBertForCausalLM", "FlaxBertForMaskedLM", "FlaxBertForMultipleChoice", "FlaxBertForNextSentencePrediction", "FlaxBertForPreTraining", "FlaxBertForQuestionAnswering", "FlaxBertForSequenceClassification", "FlaxBertForTokenClassification", "FlaxBertModel", "FlaxBigBirdForCausalLM", "FlaxBigBirdForMaskedLM", "FlaxBigBirdForMultipleChoice", "FlaxBigBirdForPreTraining", "FlaxBigBirdForQuestionAnswering", "FlaxBigBirdForSequenceClassification", "FlaxBigBirdForTokenClassification", "FlaxBigBirdModel", "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotSmallForConditionalGeneration", "FlaxBlenderbotSmallModel", "FlaxBloomForCausalLM", "FlaxBloomModel", "FlaxCLIPModel", "FlaxDinov2ForImageClassification", "FlaxDinov2Model", "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxElectraForCausalLM", "FlaxElectraForMaskedLM", "FlaxElectraForMultipleChoice", "FlaxElectraForPreTraining", "FlaxElectraForQuestionAnswering", "FlaxElectraForSequenceClassification", "FlaxElectraForTokenClassification", "FlaxElectraModel", "FlaxEncoderDecoderModel", "FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPTJForCausalLM", "FlaxGPTJModel", "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxGemmaForCausalLM", "FlaxGemmaModel", "FlaxMBartForConditionalGeneration", "FlaxMBartForQuestionAnswering", "FlaxMBartForSequenceClassification", "FlaxMBartModel", "FlaxMarianMTModel", "FlaxMarianModel", "FlaxMistralForCausalLM", "FlaxMistralModel", "FlaxOPTForCausalLM", "FlaxPegasusForConditionalGeneration", "FlaxPegasusModel", "FlaxRegNetForImageClassification", "FlaxRegNetModel", "FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRobertaForCausalLM", "FlaxRobertaForMaskedLM", "FlaxRobertaForMultipleChoice", "FlaxRobertaForQuestionAnswering", "FlaxRobertaForSequenceClassification", "FlaxRobertaForTokenClassification", "FlaxRobertaModel", "FlaxRobertaPreLayerNormForCausalLM", "FlaxRobertaPreLayerNormForMaskedLM", "FlaxRobertaPreLayerNormForMultipleChoice", "FlaxRobertaPreLayerNormForQuestionAnswering", "FlaxRobertaPreLayerNormForSequenceClassification", "FlaxRobertaPreLayerNormForTokenClassification", "FlaxRobertaPreLayerNormModel", "FlaxSpeechEncoderDecoderModel", "FlaxViTForImageClassification", "FlaxViTModel", "FlaxVisionEncoderDecoderModel", "FlaxVisionTextDualEncoderModel", "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWhisperForAudioClassification", "FlaxWhisperForConditionalGeneration", "FlaxWhisperModel", "FlaxWhisperTimeStampLogitsProcessor", "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXLMRobertaForCausalLM", "FlaxXLMRobertaForMaskedLM", "FlaxXLMRobertaForMultipleChoice", "FlaxXLMRobertaForQuestionAnswering", "FlaxXLMRobertaForSequenceClassification", "FlaxXLMRobertaForTokenClassification", "FlaxXLMRobertaModel", "FNetConfig", "FNetModel", "FNetTokenizerFast", "FSMTConfig", "FeatureExtractionPipeline", "FillMaskPipeline", "FlaubertConfig", "FlavaConfig", "FlavaForPreTraining", "FlavaImageModel", "FlavaImageProcessor", "FlavaMultimodalModel", "FlavaTextConfig", "FlavaTextModel", "FocalNetModel", "FunnelTokenizerFast", "GPTBigCodeConfig", "GPTJConfig", "GPTNeoXConfig", "GPTNeoXJapaneseConfig", "GPTNeoXTokenizerFast", "GPTSanJapaneseConfig", "GitConfig", "GitVisionConfig", "GraphormerConfig", "GroupViTTextConfig", "GroupViTVisionConfig", "HerbertTokenizerFast", "HubertConfig", "HubertForCTC", "IBertConfig", "IBertModel", "IdeficsConfig", "IdeficsProcessor", "IJepaModel", "ImageClassificationPipeline", "ImageFeatureExtractionPipeline", "ImageGPTConfig", "ImageSegmentationPipeline", "ImageTextToTextPipeline", "ImageToImagePipeline", "ImageToTextPipeline", "InformerConfig", "JukeboxPriorConfig", "JukeboxTokenizer", "LEDConfig", "LEDTokenizerFast", "LayoutLMForQuestionAnswering", "LayoutLMTokenizerFast", "LayoutLMv2Config", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2TokenizerFast", "LayoutLMv3Config", "LayoutLMv3ImageProcessor", "LayoutLMv3TokenizerFast", "LayoutXLMTokenizerFast", "LevitConfig", "LiltConfig", "LiltModel", "LongT5Config", "LongformerConfig", "LongformerModel", "LongformerTokenizerFast", "LukeModel", "LukeTokenizer", "LxmertTokenizerFast", "M2M100Config", "M2M100Tokenizer", "MarkupLMProcessor", "MaskGenerationPipeline", "MBart50TokenizerFast", "MBartConfig", "MCTCTFeatureExtractor", "MPNetConfig", "MPNetModel", "MPNetTokenizerFast", "MT5Config", "MT5TokenizerFast", "MarianConfig", "MarianTokenizer", "MarkupLMConfig", "MarkupLMModel", "MarkupLMTokenizer", "MarkupLMTokenizerFast", "Mask2FormerConfig", "MaskFormerConfig", "MaxTimeCriteria", "MegaConfig", "MegaModel", "MegatronBertConfig", "MegatronBertForPreTraining", "MegatronBertModel", "MobileBertConfig", "MobileBertModel", "MobileBertTokenizerFast", "MobileNetV1ImageProcessor", "MobileNetV1Model", "MobileNetV2ImageProcessor", "MobileNetV2Model", "MobileViTModel", "MobileViTV2Model", "MLukeTokenizer", "MraConfig", "MusicgenDecoderConfig", "MusicgenForConditionalGeneration", "MusicgenMelodyForConditionalGeneration", "MvpConfig", "MvpTokenizerFast", "MT5Tokenizer", "NatModel", "NerPipeline", "NezhaConfig", "NezhaModel", "NllbMoeConfig", "NllbTokenizer", "NllbTokenizerFast", "NystromformerConfig", "OPTConfig", "ObjectDetectionPipeline", "OneFormerProcessor", "OpenAIGPTTokenizerFast", "OpenLlamaConfig", "PLBartConfig", "PegasusConfig", "PegasusTokenizer", "PegasusTokenizerFast", "PegasusXConfig", "PerceiverImageProcessor", "PerceiverModel", "PerceiverTokenizer", "PersimmonConfig", "Pipeline", "Pix2StructConfig", "Pix2StructTextConfig", "PLBartTokenizer", "Pop2PianoConfig", "PreTrainedTokenizer", "PreTrainedTokenizerBase", "PreTrainedTokenizerFast", "PrefixConstrainedLogitsProcessor", "ProphetNetConfig", "QDQBertConfig", "QDQBertModel", "QuestionAnsweringPipeline", "RagConfig", "RagModel", "RagRetriever", "RagSequenceForGeneration", "RagTokenForGeneration", "RealmConfig", "RealmForOpenQA", "RealmScorer", "RealmTokenizerFast", "ReformerConfig", "ReformerTokenizerFast", "RegNetConfig", "RemBertConfig", "RemBertModel", "RemBertTokenizer", "RemBertTokenizerFast", "RetriBertConfig", "RetriBertTokenizerFast", "RoCBertConfig", "RoCBertModel", "RoCBertTokenizer", "RoFormerConfig", "RobertaConfig", "RobertaModel", "RobertaPreLayerNormConfig", "RobertaPreLayerNormModel", "RobertaTokenizerFast", "SEWConfig", "SEWDConfig", "SEWDForCTC", "SEWForCTC", "SamConfig", "SamPromptEncoderConfig", "SeamlessM4TConfig", # use of unconventional markdown "SeamlessM4Tv2Config", # use of unconventional markdown "Seq2SeqTrainingArguments", "SpecialTokensMixin", "Speech2Text2Config", "Speech2Text2Tokenizer", "Speech2TextTokenizer", "SpeechEncoderDecoderModel", "SpeechT5Config", "SpeechT5Model", "SplinterConfig", "SplinterTokenizerFast", "SqueezeBertTokenizerFast", "SummarizationPipeline", "Swin2SRImageProcessor", "Swinv2Model", "SwitchTransformersConfig", "T5Config", "T5Tokenizer", "T5TokenizerFast", "TableQuestionAnsweringPipeline", "TableTransformerConfig", "TapasConfig", "TapasModel", "TapasTokenizer", "Text2TextGenerationPipeline", "TextClassificationPipeline", "TextGenerationPipeline", "TFBartForConditionalGeneration", "TFBartForSequenceClassification", "TFBartModel", "TFBertModel", "TFConvNextModel", "TFData2VecVisionModel", "TFDeiTModel", "TFEncoderDecoderModel", "TFEsmModel", "TFMobileViTModel", "TFRagModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", "TFRepetitionPenaltyLogitsProcessor", "TFSwinModel", "TFViTModel", "TFVisionEncoderDecoderModel", "TFVisionTextDualEncoderModel", "TFXGLMForCausalLM", "TFXGLMModel", "TimeSeriesTransformerConfig", "TokenClassificationPipeline", "TrOCRConfig", "TrainerState", "TrainingArguments", "TrajectoryTransformerConfig", "TranslationPipeline", "TvltImageProcessor", "UMT5Config", "UperNetConfig", "UperNetForSemanticSegmentation", "ViTHybridImageProcessor", "ViTHybridModel", "ViTMSNModel", "ViTModel", "VideoClassificationPipeline", "ViltConfig", "ViltForImagesAndTextClassification", "ViltModel", "VisionEncoderDecoderModel", "VisionTextDualEncoderModel", "VisualBertConfig", "VisualBertModel", "VisualQuestionAnsweringPipeline", "VitMatteForImageMatting", "VitsTokenizer", "VivitModel", "Wav2Vec2BertForCTC", "Wav2Vec2CTCTokenizer", "Wav2Vec2Config", "Wav2Vec2ConformerConfig", "Wav2Vec2ConformerForCTC", "Wav2Vec2FeatureExtractor", "Wav2Vec2PhonemeCTCTokenizer", "WavLMConfig", "WavLMForCTC", "WhisperConfig", "WhisperFeatureExtractor", "WhisperForAudioClassification", "XCLIPTextConfig", "XCLIPVisionConfig", "XGLMConfig", "XGLMModel", "XGLMTokenizerFast", "XLMConfig", "XLMProphetNetConfig", "XLMRobertaConfig", "XLMRobertaModel", "XLMRobertaTokenizerFast", "XLMRobertaXLConfig", "XLMRobertaXLModel", "XLNetConfig", "XLNetTokenizerFast", "XmodConfig", "XmodModel", "YolosImageProcessor", "YolosModel", "YosoConfig", "ZeroShotAudioClassificationPipeline", "ZeroShotClassificationPipeline", "ZeroShotImageClassificationPipeline", "ZeroShotObjectDetectionPipeline", ] # Supported math operations when interpreting the value of defaults. MATH_OPERATORS = { ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul, ast.Div: op.truediv, ast.Pow: op.pow, ast.BitXor: op.xor, ast.USub: op.neg, } def find_indent(line: str) -> int: """ Returns the number of spaces that start a line indent. """ search = re.search(r"^(\s*)(?:\S|$)", line) if search is None: return 0 return len(search.groups()[0]) def stringify_default(default: Any) -> str: """ Returns the string representation of a default value, as used in docstring: numbers are left as is, all other objects are in backtiks. Args: default (`Any`): The default value to process Returns: `str`: The string representation of that default. """ if isinstance(default, bool): # We need to test for bool first as a bool passes isinstance(xxx, (int, float)) return f"`{default}`" elif isinstance(default, enum.Enum): # We need to test for enum first as an enum with int values will pass isinstance(xxx, (int, float)) return f"`{str(default)}`" elif isinstance(default, int): return str(default) elif isinstance(default, float): result = str(default) return str(round(default, 2)) if len(result) > 6 else result elif isinstance(default, str): return str(default) if default.isnumeric() else f'`"{default}"`' elif isinstance(default, type): return f"`{default.__name__}`" else: return f"`{default}`" def eval_math_expression(expression: str) -> Optional[Union[float, int]]: # Mainly taken from the excellent https://stackoverflow.com/a/9558001 """ Evaluate (safely) a mathematial expression and returns its value. Args: expression (`str`): The expression to evaluate. Returns: `Optional[Union[float, int]]`: Returns `None` if the evaluation fails in any way and the value computed otherwise. Example: ```py >>> eval_expr('2^6') 4 >>> eval_expr('2**6') 64 >>> eval_expr('1 + 2*3**(4^5) / (6 + -7)') -5.0 ``` """ try: return eval_node(ast.parse(expression, mode="eval").body) except TypeError: return def eval_node(node): if isinstance(node, ast.Num): # <number> return node.n elif isinstance(node, ast.BinOp): # <left> <operator> <right> return MATH_OPERATORS[type(node.op)](eval_node(node.left), eval_node(node.right)) elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1 return MATH_OPERATORS[type(node.op)](eval_node(node.operand)) else: raise TypeError(node) def replace_default_in_arg_description(description: str, default: Any) -> str: """ Catches the default value in the description of an argument inside a docstring and replaces it by the value passed. Args: description (`str`): The description of an argument in a docstring to process. default (`Any`): The default value that whould be in the docstring of that argument. Returns: `str`: The description updated with the new default value. """ # Lots of docstrings have `optional` or **opational** instead of *optional* so we do this fix here. description = description.replace("`optional`", OPTIONAL_KEYWORD) description = description.replace("**optional**", OPTIONAL_KEYWORD) if default is inspect._empty: # No default, make sure the description doesn't have any either idx = description.find(OPTIONAL_KEYWORD) if idx != -1: description = description[:idx].rstrip() if description.endswith(","): description = description[:-1].rstrip() elif default is None: # Default None are not written, we just set `*optional*`. If there is default that is not None specified in the # description, we do not erase it (as sometimes we set the default to `None` because the default is a mutable # object). idx = description.find(OPTIONAL_KEYWORD) if idx == -1: description = f"{description}, {OPTIONAL_KEYWORD}" elif re.search(r"defaults to `?None`?", description) is not None: len_optional = len(OPTIONAL_KEYWORD) description = description[: idx + len_optional] else: str_default = None # For numbers we may have a default that is given by a math operation (1/255 is really popular). We don't # want to replace those by their actual values. if isinstance(default, (int, float)) and re.search("defaults to `?(.*?)(?:`|$)", description) is not None: # Grab the default and evaluate it. current_default = re.search("defaults to `?(.*?)(?:`|$)", description).groups()[0] if default == eval_math_expression(current_default): try: # If it can be directly converted to the type of the default, it's a simple value str_default = str(type(default)(current_default)) except Exception: # Otherwise there is a math operator so we add a code block. str_default = f"`{current_default}`" elif isinstance(default, enum.Enum) and default.name == current_default.split(".")[-1]: # When the default is an Enum (this is often the case for PIL.Image.Resampling), and the docstring # matches the enum name, keep the existing docstring rather than clobbering it with the enum value. str_default = f"`{current_default}`" if str_default is None: str_default = stringify_default(default) # Make sure default match if OPTIONAL_KEYWORD not in description: description = f"{description}, {OPTIONAL_KEYWORD}, defaults to {str_default}" elif _re_parse_description.search(description) is None: idx = description.find(OPTIONAL_KEYWORD) len_optional = len(OPTIONAL_KEYWORD) description = f"{description[:idx + len_optional]}, defaults to {str_default}" else: description = _re_parse_description.sub(rf"*optional*, defaults to {str_default}", description) return description def get_default_description(arg: inspect.Parameter) -> str: """ Builds a default description for a parameter that was not documented. Args: arg (`inspect.Parameter`): The argument in the signature to generate a description for. Returns: `str`: The description. """ if arg.annotation is inspect._empty: arg_type = "<fill_type>" elif hasattr(arg.annotation, "__name__"): arg_type = arg.annotation.__name__ else: arg_type = str(arg.annotation) if arg.default is inspect._empty: return f"`{arg_type}`" elif arg.default is None: return f"`{arg_type}`, {OPTIONAL_KEYWORD}" else: str_default = stringify_default(arg.default) return f"`{arg_type}`, {OPTIONAL_KEYWORD}, defaults to {str_default}" def find_source_file(obj: Any) -> Path: """ Finds the source file of an object. Args: obj (`Any`): The object whose source file we are looking for. Returns: `Path`: The source file. """ module = obj.__module__ obj_file = PATH_TO_TRANSFORMERS for part in module.split(".")[1:]: obj_file = obj_file / part return obj_file.with_suffix(".py") def match_docstring_with_signature(obj: Any) -> Optional[Tuple[str, str]]: """ Matches the docstring of an object with its signature. Args: obj (`Any`): The object to process. Returns: `Optional[Tuple[str, str]]`: Returns `None` if there is no docstring or no parameters documented in the docstring, otherwise returns a tuple of two strings: the current documentation of the arguments in the docstring and the one matched with the signature. """ if len(getattr(obj, "__doc__", "")) == 0: # Nothing to do, there is no docstring. return # Read the docstring in the source code to see if there is a special command to ignore this object. try: source, _ = inspect.getsourcelines(obj) except OSError: source = [] idx = 0 while idx < len(source) and '"""' not in source[idx]: idx += 1 ignore_order = False if idx < len(source): line_before_docstring = source[idx - 1] if re.search(r"^\s*#\s*no-format\s*$", line_before_docstring): # This object is ignored return elif re.search(r"^\s*#\s*ignore-order\s*$", line_before_docstring): ignore_order = True # Read the signature signature = inspect.signature(obj).parameters obj_doc_lines = obj.__doc__.split("\n") # Get to the line where we start documenting arguments idx = 0 while idx < len(obj_doc_lines) and _re_args.search(obj_doc_lines[idx]) is None: idx += 1 if idx == len(obj_doc_lines): # Nothing to do, no parameters are documented. return if "kwargs" in signature and signature["kwargs"].annotation != inspect._empty: # Inspecting signature with typed kwargs is not supported yet. return indent = find_indent(obj_doc_lines[idx]) arguments = {} current_arg = None idx += 1 start_idx = idx # Keep going until the arg section is finished (nonempty line at the same indent level) or the end of the docstring. while idx < len(obj_doc_lines) and ( len(obj_doc_lines[idx].strip()) == 0 or find_indent(obj_doc_lines[idx]) > indent ): if find_indent(obj_doc_lines[idx]) == indent + 4: # New argument -> let's generate the proper doc for it re_search_arg = _re_parse_arg.search(obj_doc_lines[idx]) if re_search_arg is not None: _, name, description = re_search_arg.groups() current_arg = name if name in signature: default = signature[name].default if signature[name].kind is inspect._ParameterKind.VAR_KEYWORD: default = None new_description = replace_default_in_arg_description(description, default) else: new_description = description init_doc = _re_parse_arg.sub(rf"\1\2 ({new_description}):", obj_doc_lines[idx]) arguments[current_arg] = [init_doc] elif current_arg is not None: arguments[current_arg].append(obj_doc_lines[idx]) idx += 1 # We went too far by one (perhaps more if there are a lot of new lines) idx -= 1 if current_arg: while len(obj_doc_lines[idx].strip()) == 0: arguments[current_arg] = arguments[current_arg][:-1] idx -= 1 # And we went too far by one again. idx += 1 old_doc_arg = "\n".join(obj_doc_lines[start_idx:idx]) old_arguments = list(arguments.keys()) arguments = {name: "\n".join(doc) for name, doc in arguments.items()} # Add missing arguments with a template for name in set(signature.keys()) - set(arguments.keys()): arg = signature[name] # We ignore private arguments or *args/**kwargs (unless they are documented by the user) if name.startswith("_") or arg.kind in [ inspect._ParameterKind.VAR_KEYWORD, inspect._ParameterKind.VAR_POSITIONAL, ]: arguments[name] = "" else: arg_desc = get_default_description(arg) arguments[name] = " " * (indent + 4) + f"{name} ({arg_desc}): <fill_docstring>" # Arguments are sorted by the order in the signature unless a special comment is put. if ignore_order: new_param_docs = [arguments[name] for name in old_arguments if name in signature] missing = set(signature.keys()) - set(old_arguments) new_param_docs.extend([arguments[name] for name in missing if len(arguments[name]) > 0]) else: new_param_docs = [arguments[name] for name in signature.keys() if len(arguments[name]) > 0] new_doc_arg = "\n".join(new_param_docs) return old_doc_arg, new_doc_arg def fix_docstring(obj: Any, old_doc_args: str, new_doc_args: str): """ Fixes the docstring of an object by replacing its arguments documentaiton by the one matched with the signature. Args: obj (`Any`): The object whose dostring we are fixing. old_doc_args (`str`): The current documentation of the parameters of `obj` in the docstring (as returned by `match_docstring_with_signature`). new_doc_args (`str`): The documentation of the parameters of `obj` matched with its signature (as returned by `match_docstring_with_signature`). """ # Read the docstring in the source code and make sure we have the right part of the docstring source, line_number = inspect.getsourcelines(obj) # Get to the line where we start documenting arguments idx = 0 while idx < len(source) and _re_args.search(source[idx]) is None: idx += 1 if idx == len(source): # Args are not defined in the docstring of this object return # Get to the line where we stop documenting arguments indent = find_indent(source[idx]) idx += 1 start_idx = idx while idx < len(source) and (len(source[idx].strip()) == 0 or find_indent(source[idx]) > indent): idx += 1 idx -= 1 while len(source[idx].strip()) == 0: idx -= 1 idx += 1 if "".join(source[start_idx:idx])[:-1] != old_doc_args: # Args are not fully defined in the docstring of this object return obj_file = find_source_file(obj) with open(obj_file, "r", encoding="utf-8") as f: content = f.read() # Replace content lines = content.split("\n") lines = lines[: line_number + start_idx - 1] + [new_doc_args] + lines[line_number + idx - 1 :] print(f"Fixing the docstring of {obj.__name__} in {obj_file}.") with open(obj_file, "w", encoding="utf-8") as f: f.write("\n".join(lines)) def check_docstrings(overwrite: bool = False, check_all: bool = False): """ Check docstrings of all public objects that are callables and are documented. By default, only checks the diff. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether to fix inconsistencies or not. check_all (`bool`, *optional*, defaults to `False`): Whether to check all files. """ module_diff_files = None if not check_all: module_diff_files = set() repo = Repo(PATH_TO_REPO) # Diff from index to unstaged files for modified_file_diff in repo.index.diff(None): if modified_file_diff.a_path.startswith("src/transformers"): module_diff_files.add(modified_file_diff.a_path) # Diff from index to `main` for modified_file_diff in repo.index.diff(repo.refs.main.commit): if modified_file_diff.a_path.startswith("src/transformers"): module_diff_files.add(modified_file_diff.a_path) # quick escape route: if there are no module files in the diff, skip this check if len(module_diff_files) == 0: return print(" Checking docstrings in the following files:" + "\n - " + "\n - ".join(module_diff_files)) failures = [] hard_failures = [] to_clean = [] for name in dir(transformers): # Skip objects that are private or not documented. if name.startswith("_") or ignore_undocumented(name) or name in OBJECTS_TO_IGNORE: continue obj = getattr(transformers, name) if not callable(obj) or not isinstance(obj, type) or getattr(obj, "__doc__", None) is None: continue # If we are checking against the diff, we skip objects that are not part of the diff. if module_diff_files is not None: object_file = find_source_file(getattr(transformers, name)) object_file_relative_path = "src/" + str(object_file).split("/src/")[1] if object_file_relative_path not in module_diff_files: continue # Check docstring try: result = match_docstring_with_signature(obj) if result is not None: old_doc, new_doc = result else: old_doc, new_doc = None, None except Exception as e: print(e) hard_failures.append(name) continue if old_doc != new_doc: if overwrite: fix_docstring(obj, old_doc, new_doc) else: failures.append(name) elif not overwrite and new_doc is not None and ("<fill_type>" in new_doc or "<fill_docstring>" in new_doc): to_clean.append(name) # Deal with errors error_message = "" if len(hard_failures) > 0: error_message += ( "The argument part of the docstrings of the following objects could not be processed, check they are " "properly formatted." ) error_message += "\n" + "\n".join([f"- {name}" for name in hard_failures]) if len(failures) > 0: error_message += ( "The following objects docstrings do not match their signature. Run `make fix-copies` to fix this. " "In some cases, this error may be raised incorrectly by the docstring checker. If you think this is the " "case, you can manually check the docstrings and then add the object name to `OBJECTS_TO_IGNORE` in " "`utils/check_docstrings.py`." ) error_message += "\n" + "\n".join([f"- {name}" for name in failures]) if len(to_clean) > 0: error_message += ( "The following objects docstrings contain templates you need to fix: search for `<fill_type>` or " "`<fill_docstring>`." ) error_message += "\n" + "\n".join([f"- {name}" for name in to_clean]) if len(error_message) > 0: error_message = "There was at least one problem when checking docstrings of public objects.\n" + error_message raise ValueError(error_message) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") parser.add_argument( "--check_all", action="store_true", help="Whether to check all files. By default, only checks the diff" ) args = parser.parse_args() check_docstrings(overwrite=args.fix_and_overwrite, check_all=args.check_all)
transformers/utils/check_docstrings.py/0
{ "file_path": "transformers/utils/check_docstrings.py", "repo_id": "transformers", "token_count": 14531 }
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging logger = logging.get_logger(__name__) def extract_warnings_from_single_artifact(artifact_path, targets): """Extract warnings from a downloaded artifact (in .zip format)""" selected_warnings = set() buffer = [] def parse_line(fp): for line in fp: if isinstance(line, bytes): line = line.decode("UTF-8") if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" "): # process a single warning and move it to `selected_warnings`. if len(buffer) > 0: warning = "\n".join(buffer) # Only keep the warnings specified in `targets` if any(f": {x}: " in warning for x in targets): selected_warnings.add(warning) buffer.clear() continue else: line = line.strip() buffer.append(line) if from_gh: for filename in os.listdir(artifact_path): file_path = os.path.join(artifact_path, filename) if not os.path.isdir(file_path): # read the file if filename != "warnings.txt": continue with open(file_path) as fp: parse_line(fp) else: try: with zipfile.ZipFile(artifact_path) as z: for filename in z.namelist(): if not os.path.isdir(filename): # read the file if filename != "warnings.txt": continue with z.open(filename) as fp: parse_line(fp) except Exception: logger.warning( f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def extract_warnings(artifact_dir, targets): """Extract warnings from all artifact files""" selected_warnings = set() paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if (p.endswith(".zip") or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(p, targets)) return selected_warnings if __name__ == "__main__": def list_str(values): return values.split(",") parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") # optional parameters parser.add_argument( "--targets", default="DeprecationWarning,UserWarning,FutureWarning", type=list_str, help="Comma-separated list of target warning(s) which we want to extract.", ) parser.add_argument( "--from_gh", action="store_true", help="If running from a GitHub action workflow and collecting warnings from its artifacts.", ) args = parser.parse_args() from_gh = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v4` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links artifacts = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("=" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts selected_warnings = extract_warnings(args.output_dir, args.targets) selected_warnings = sorted(selected_warnings) with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
transformers/utils/extract_warnings.py/0
{ "file_path": "transformers/utils/extract_warnings.py", "repo_id": "transformers", "token_count": 2110 }
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
transformers/utils/print_env.py/0
{ "file_path": "transformers/utils/print_env.py", "repo_id": "transformers", "token_count": 546 }
# Best of N sampling: Alternative ways to get better model output without RL based fine-tuning Within the extras module is the `best-of-n` sampler class that serves as an alternative method of generating better model output. As to how it fares against the RL based fine-tuning, please look in the `examples` directory for a comparison example ## Usage To get started quickly, instantiate an instance of the class with a model, a length sampler, a tokenizer and a callable that serves as a proxy reward pipeline that outputs reward scores for input queries ```python from transformers import pipeline, AutoTokenizer from trl import AutoModelForCausalLMWithValueHead from trl.core import LengthSampler from trl.extras import BestOfNSampler ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name) reward_pipe = pipeline("sentiment-analysis", model=reward_model, device=device) tokenizer = AutoTokenizer.from_pretrained(ref_model_name) tokenizer.pad_token = tokenizer.eos_token # callable that takes a list of raw text and returns a list of corresponding reward scores def queries_to_scores(list_of_strings): return [output["score"] for output in reward_pipe(list_of_strings)] best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler) ``` And assuming you have a list/tensor of tokenized queries, you can generate better output by calling the `generate` method ```python best_of_n.generate(query_tensors, device=device, **gen_kwargs) ``` The default sample size is 4, but you can change it at the time of instance initialization like so ```python best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler, sample_size=8) ``` The default output is the result of taking the top scored output for each query, but you can change it to top 2 and so on by passing the `n_candidates` argument at the time of instance initialization ```python best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler, n_candidates=2) ``` There is the option of setting the generation settings (like `temperature`, `pad_token_id`) at the time of instance creation as opposed to when calling the `generate` method. This is done by passing a `GenerationConfig` from the `transformers` library at the time of initialization ```python from transformers import GenerationConfig generation_config = GenerationConfig(min_length= -1, top_k=0.0, top_p= 1.0, do_sample= True, pad_token_id=tokenizer.eos_token_id) best_of_n = BestOfNSampler(model, tokenizer, queries_to_scores, length_sampler=output_length_sampler, generation_config=generation_config) best_of_n.generate(query_tensors, device=device) ``` Furthermore, at the time of initialization you can set the seed to control the repeatability of the generation process and the number of samples to generate for each query
trl/docs/source/best_of_n.md/0
{ "file_path": "trl/docs/source/best_of_n.md", "repo_id": "trl", "token_count": 841 }
<div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl_banner_dark.png"> </div> # TRL - Transformer Reinforcement Learning TRL is a full stack library where we provide a set of tools to train transformer language models with Reinforcement Learning, from the Supervised Fine-tuning step (SFT), Reward Modeling step (RM) to the Proximal Policy Optimization (PPO) step. The library is integrated with 🤗 [transformers](https://github.com/huggingface/transformers). ## Learn Learn post-training with TRL and other libraries in 🤗 [smol course](https://github.com/huggingface/smol-course). ## API documentation - [Model Classes](models): *A brief overview of what each public model class does.* - [`SFTTrainer`](sft_trainer): *Supervise Fine-tune your model easily with `SFTTrainer`* - [`RewardTrainer`](reward_trainer): *Train easily your reward model using `RewardTrainer`.* - [`PPOTrainer`](ppo_trainer): *Further fine-tune the supervised fine-tuned model using PPO algorithm* - [Best-of-N Sampling](best-of-n): *Use best of n sampling as an alternative way to sample predictions from your active model* - [`DPOTrainer`](dpo_trainer): *Direct Preference Optimization training using `DPOTrainer`.* - [`TextEnvironment`](text_environments): *Text environment to train your model using tools with RL.* ## Examples - [Sentiment Tuning](sentiment_tuning): *Fine tune your model to generate positive movie contents* - [Training with PEFT](lora_tuning_peft): *Memory efficient RLHF training using adapters with PEFT* - [Detoxifying LLMs](detoxifying_a_lm): *Detoxify your language model through RLHF* - [StackLlama](using_llama_models): *End-to-end RLHF training of a Llama model on Stack exchange dataset* - [Learning with Tools](learning_tools): *Walkthrough of using `TextEnvironments`* - [Multi-Adapter Training](multi_adapter_rl): *Use a single base model and multiple adapters for memory efficient end-to-end training* ## Blog posts <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/dpo_vlm"> <img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/dpo_vlm/thumbnail.png" alt="thumbnail" class="mt-0"> <p class="text-gray-500 text-sm">Published on July 10, 2024</p> <p class="text-gray-700">Preference Optimization for Vision Language Models with TRL</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/putting_rl_back_in_rlhf_with_rloo"> <img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/putting_rl_back_in_rlhf_with_rloo/thumbnail.png" alt="thumbnail" class="mt-0"> <p class="text-gray-500 text-sm">Published on June 12, 2024</p> <p class="text-gray-700">Putting RL back in RLHF</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/trl-ddpo"> <img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/166_trl_ddpo/thumbnail.png" alt="thumbnail" class="mt-0"> <p class="text-gray-500 text-sm">Published on September 29, 2023</p> <p class="text-gray-700">Finetune Stable Diffusion Models with DDPO via TRL</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/dpo-trl"> <img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/157_dpo_trl/dpo_thumbnail.png" alt="thumbnail" class="mt-0"> <p class="text-gray-500 text-sm">Published on August 8, 2023</p> <p class="text-gray-700">Fine-tune Llama 2 with DPO</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/stackllama"> <img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/138_stackllama/thumbnail.png" alt="thumbnail" class="mt-0"> <p class="text-gray-500 text-sm">Published on April 5, 2023</p> <p class="text-gray-700">StackLLaMA: A hands-on guide to train LLaMA with RLHF</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/trl-peft"> <img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/133_trl_peft/thumbnail.png" alt="thumbnail" class="mt-0"> <p class="text-gray-500 text-sm">Published on March 9, 2023</p> <p class="text-gray-700">Fine-tuning 20B LLMs with RLHF on a 24GB consumer GPU</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="https://huggingface.co/blog/rlhf"> <img src="https://raw.githubusercontent.com/huggingface/blog/main/assets/120_rlhf/thumbnail.png" alt="thumbnail" class="mt-0"> <p class="text-gray-500 text-sm">Published on December 9, 2022</p> <p class="text-gray-700">Illustrating Reinforcement Learning from Human Feedback</p> </a> </div> </div>
trl/docs/source/index.md/0
{ "file_path": "trl/docs/source/index.md", "repo_id": "trl", "token_count": 1901 }
# Quickstart ## How does it work? Fine-tuning a language model via PPO consists of roughly three steps: 1. **Rollout**: The language model generates a response or continuation based on a query which could be the start of a sentence. 2. **Evaluation**: The query and response are evaluated with a function, model, human feedback, or some combination of them. The important thing is that this process should yield a scalar value for each query/response pair. The optimization will aim at maximizing this value. 3. **Optimization**: This is the most complex part. In the optimisation step the query/response pairs are used to calculate the log-probabilities of the tokens in the sequences. This is done with the model that is trained and a reference model, which is usually the pre-trained model before fine-tuning. The KL-divergence between the two outputs is used as an additional reward signal to make sure the generated responses don't deviate too far from the reference language model. The active language model is then trained with PPO. The full process is illustrated in the following figure: <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl_overview.png"/> ## Minimal example The following code illustrates the steps above. ```python # 0. imports import torch from transformers import GPT2Tokenizer from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer # 1. load a pretrained model model = AutoModelForCausalLMWithValueHead.from_pretrained("gpt2") ref_model = AutoModelForCausalLMWithValueHead.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token # 2. initialize trainer ppo_config = {"mini_batch_size": 1, "batch_size": 1} config = PPOConfig(**ppo_config) ppo_trainer = PPOTrainer(config, model, ref_model, tokenizer) # 3. encode a query query_txt = "This morning I went to the " query_tensor = tokenizer.encode(query_txt, return_tensors="pt").to(model.pretrained_model.device) # 4. generate model response generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, "max_new_tokens": 20, } response_tensor = ppo_trainer.generate([item for item in query_tensor], return_prompt=False, **generation_kwargs) response_txt = tokenizer.decode(response_tensor[0]) # 5. define a reward for response # (this could be any reward such as human feedback or output from another model) reward = [torch.tensor(1.0, device=model.pretrained_model.device)] # 6. train model with ppo train_stats = ppo_trainer.step([query_tensor[0]], [response_tensor[0]], reward) ``` In general, you would run steps 3-6 in a for-loop and run it on many diverse queries. You can find more realistic examples in the examples section. ## How to use a trained model After training a `AutoModelForCausalLMWithValueHead`, you can directly use the model in `transformers`. ```python # .. Let's assume we have a trained model using `PPOTrainer` and `AutoModelForCausalLMWithValueHead` # push the model on the Hub model.push_to_hub("my-fine-tuned-model-ppo") # or save it locally model.save_pretrained("my-fine-tuned-model-ppo") # load the model from the Hub from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("my-fine-tuned-model-ppo") ``` You can also load your model with `AutoModelForCausalLMWithValueHead` if you want to use the value head, for example to continue training. ```python from trl.model import AutoModelForCausalLMWithValueHead model = AutoModelForCausalLMWithValueHead.from_pretrained("my-fine-tuned-model-ppo") ```
trl/docs/source/quickstart.md/0
{ "file_path": "trl/docs/source/quickstart.md", "repo_id": "trl", "token_count": 1117 }
compute_environment: LOCAL_MACHINE debug: false deepspeed_config: deepspeed_multinode_launcher: standard offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 8 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false
trl/examples/accelerate_configs/deepspeed_zero3.yaml/0
{ "file_path": "trl/examples/accelerate_configs/deepspeed_zero3.yaml", "repo_id": "trl", "token_count": 193 }
<jupyter_start><jupyter_text>**Best-of-n sampling as an alternative to RLHF**This notebook compares reward-model scores of prompt based responses from 1. a base model (`gpt2-imdb`)2. `RLHF` tuned model based on this base-model 3. the base-model again from which we sample n responses to each prompt, score them and take the best scored one AKA the `best-of-n sampled` modelImport dependencies<jupyter_code>%pip install transformers trl import torch import pandas as pd from transformers import pipeline, AutoTokenizer from datasets import load_dataset from trl import AutoModelForCausalLMWithValueHead from trl.core import LengthSampler device = "cuda" if torch.cuda.is_available() else "cpu"<jupyter_output><empty_output><jupyter_text>Various constants<jupyter_code>ref_model_name = "lvwerra/gpt2-imdb" model_name = "lvwerra/gpt2-imdb-pos-v2" reward_model = "lvwerra/distilbert-imdb" N_BEST_OF = 4<jupyter_output><empty_output><jupyter_text>Models and tokenizers<jupyter_code>model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name) ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name) reward_pipe = pipeline("sentiment-analysis", model=reward_model, device=device) tokenizer = AutoTokenizer.from_pretrained(ref_model_name) tokenizer.pad_token = tokenizer.eos_token # cuda-ize models model.to(device) ref_model.to(device)<jupyter_output>/Users/kashif/Github/transformers/src/transformers/tokenization_utils_base.py:1617: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be deprecated in transformers v4.45, and will be then set to `False` by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884 warnings.warn(<jupyter_text>Dataset building<jupyter_code>def build_dataset( tokenizer, dataset_name="stanfordnlp/imdb", input_min_text_length=2, input_max_text_length=8, ): # load imdb with datasets ds = load_dataset(dataset_name, split="train") ds = ds.rename_columns({"text": "review"}) ds = ds.filter(lambda x: len(x["review"]) > 200, batched=False) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): sample["input_ids"] = tokenizer.encode(sample["review"])[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, batched=False) ds.set_format(type="torch") return ds dataset = build_dataset(tokenizer) gen_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, } sent_kwargs = {"top_k": None, "function_to_apply": "none", "batch_size": 16} output_min_length = 4 output_max_length = 16 output_length_sampler = LengthSampler(output_min_length, output_max_length) #### get a batch from the dataset bs = 16 output_data = dict() dataset.set_format("pandas") df_batch = dataset[:].sample(bs) output_data["query"] = df_batch["query"].tolist() query_tensors = df_batch["input_ids"].tolist() # :: [Resp] response_tensors_ref, response_tensors = [], [] # :: [[Resp]] response_tensors_best_of = []<jupyter_output><empty_output><jupyter_text>Generation using various models<jupyter_code>for i in range(bs): gen_len = output_length_sampler() query = torch.tensor(query_tensors[i]) output = ref_model.generate( query.unsqueeze(dim=0).to(device), max_new_tokens=gen_len, **gen_kwargs ).squeeze() response_tensors_ref.append(tokenizer.decode(output)) output = model.generate( query.unsqueeze(dim=0).to(device), max_new_tokens=gen_len, **gen_kwargs ).squeeze() response_tensors.append(tokenizer.decode(output)) # generating copies of the same query for the Best-of-n sampling queries = query.repeat((N_BEST_OF, 1)) output = ref_model.generate( queries.to(device), max_new_tokens=gen_len, **gen_kwargs ).squeeze() response_tensors_best_of.append(tokenizer.batch_decode(output))<jupyter_output>The attention mask is not set and cannot be inferred from input because pad token is same as eos token. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.<jupyter_text>Scoring<jupyter_code>scores_ref = [ output[0]["score"] for output in reward_pipe(response_tensors_ref, **sent_kwargs) ] scores = [output[0]["score"] for output in reward_pipe(response_tensors, **sent_kwargs)] scores_best_of = [] for i, response in enumerate(response_tensors_best_of): # base_score = scores_ref[i] scores_best_of.append( torch.tensor( [output[0]["score"] for output in reward_pipe(response, **sent_kwargs)] ) ) output_data["response (ref)"] = response_tensors_ref output_data["scores (ref)"] = scores_ref output_data["response (RLHF)"] = response_tensors output_data["scores (RLHF)"] = scores output_data["response (best_of)"] = [ response_tensors_best_of[i][a.argmax().item()] for i, a in enumerate(scores_best_of) ] output_data["scores (best_of)"] = [a.max().item() for a in scores_best_of] # store results in a dataframe df_results = pd.DataFrame(output_data) df_results<jupyter_output><empty_output>
trl/examples/notebooks/best_of_n.ipynb/0
{ "file_path": "trl/examples/notebooks/best_of_n.ipynb", "repo_id": "trl", "token_count": 1998 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import torch from accelerate import PartialState from datasets import load_dataset from transformers import ( AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser, ) from trl import ( ModelConfig, PPOConfig, PPOTrainer, ScriptArguments, get_kbit_device_map, get_peft_config, get_quantization_config, ) from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE """ python -i examples/scripts/ppo/ppo.py \ --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ --dataset_train_split descriptiveness \ --learning_rate 3e-6 \ --output_dir models/minimal/ppo \ --per_device_train_batch_size 64 \ --gradient_accumulation_steps 1 \ --total_episodes 10000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --missing_eos_penalty 1.0 accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml \ examples/scripts/ppo/ppo.py \ --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ --dataset_train_split descriptiveness \ --output_dir models/minimal/ppo \ --num_ppo_epochs 1 \ --num_mini_batches 1 \ --learning_rate 3e-6 \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 16 \ --total_episodes 10000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --sft_model_path EleutherAI/pythia-1b-deduped \ --reward_model_path EleutherAI/pythia-1b-deduped \ --local_rollout_forward_batch_size 1 \ --missing_eos_penalty 1.0 """ if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, PPOConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_into_dataclasses() # remove output_dir if exists shutil.rmtree(training_args.output_dir, ignore_errors=True) ################ # Model & Tokenizer ################ torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) quantization_config = get_quantization_config(model_args) model_kwargs = dict( revision=model_args.model_revision, attn_implementation=model_args.attn_implementation, torch_dtype=torch_dtype, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, padding_side="left", trust_remote_code=model_args.trust_remote_code ) tokenizer.add_special_tokens({"pad_token": "[PAD]"}) if tokenizer.chat_template is None: tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE value_model = AutoModelForSequenceClassification.from_pretrained( training_args.reward_model_path, trust_remote_code=model_args.trust_remote_code, num_labels=1 ) reward_model = AutoModelForSequenceClassification.from_pretrained( training_args.reward_model_path, trust_remote_code=model_args.trust_remote_code, num_labels=1 ) policy = AutoModelForCausalLM.from_pretrained( training_args.sft_model_path, trust_remote_code=model_args.trust_remote_code ) peft_config = get_peft_config(model_args) if peft_config is None: ref_policy = AutoModelForCausalLM.from_pretrained( training_args.sft_model_path, trust_remote_code=model_args.trust_remote_code ) else: ref_policy = None ################ # Dataset ################ dataset = load_dataset( script_args.dataset_name, name=script_args.dataset_config, split=script_args.dataset_train_split ) eval_samples = 100 train_dataset = dataset.select(range(len(dataset) - eval_samples)) eval_dataset = dataset.select(range(len(dataset) - eval_samples, len(dataset))) dataset_text_field = "prompt" def prepare_dataset(dataset, tokenizer): """pre-tokenize the dataset before training; only collate during training""" def tokenize(element): outputs = tokenizer( element[dataset_text_field], padding=False, ) return {"input_ids": outputs["input_ids"]} return dataset.map( tokenize, batched=True, remove_columns=dataset.column_names, num_proc=training_args.dataset_num_proc, ) # Compute that only on the main process for faster data processing. # see: https://github.com/huggingface/trl/pull/1255 with PartialState().local_main_process_first(): train_dataset = prepare_dataset(train_dataset, tokenizer) eval_dataset = prepare_dataset(eval_dataset, tokenizer) ################ # Training ################ trainer = PPOTrainer( args=training_args, processing_class=tokenizer, model=policy, ref_model=ref_policy, reward_model=reward_model, value_model=value_model, train_dataset=train_dataset, eval_dataset=eval_dataset, peft_config=peft_config, ) trainer.train() # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name) trainer.generate_completions()
trl/examples/scripts/ppo/ppo.py/0
{ "file_path": "trl/examples/scripts/ppo/ppo.py", "repo_id": "trl", "token_count": 2397 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os from datetime import date from tabulate import tabulate MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters parser = argparse.ArgumentParser() parser.add_argument("--slack_channel_name", default="trl-push-examples-ci") parser.add_argument("--text_file_name", required=True) def main(text_file_name, slack_channel_name=None): logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) message = "" if os.path.isfile(text_file_name): final_results = {} try: with open(text_file_name) as file: for line in file: result, config_name = line.strip().split(",") config_name = config_name.split("/")[-1].split(".yaml")[0] final_results[config_name] = int(result) except Exception as e: logger.error(f"Error reading file {text_file_name}: {str(e)}") final_results = {} no_error_payload = { "type": "section", "text": { "type": "plain_text", "text": "🌞 There were no failures on the example tests!" if not len(final_results) == 0 else "Something went wrong there is at least one empty file - please check GH action results.", "emoji": True, }, } total_num_failed = sum(final_results.values()) else: no_error_payload = { "type": "section", "text": { "type": "plain_text", "text": "❌ Something is wrong with the workflow please check ASAP!" "Something went wrong there is no text file being produced. Please check ASAP.", "emoji": True, }, } total_num_failed = 0 test_type_name = text_file_name.replace(".txt", "").replace("temp_results_", "").replace("_", " ").title() payload = [ { "type": "header", "text": { "type": "plain_text", "text": "🤗 Results of the {} TRL {} example tests.".format( os.environ.get("TEST_TYPE", ""), test_type_name ), }, }, ] if total_num_failed > 0: message += f"{total_num_failed} failed tests for example tests!" for test_name, failed in final_results.items(): failed_table = tabulate( [[test_name, "✅" if not failed else "❌"]], headers=["Test Name", "Status"], showindex="always", tablefmt="grid", maxcolwidths=[12], ) message += "\n```\n" + failed_table + "\n```" print(f"### {message}") else: payload.append(no_error_payload) if os.environ.get("TEST_TYPE", "") != "": try: from slack_sdk import WebClient except ImportError: logger.error("slack_sdk is not installed. Please install it to use Slack integration.") return if len(message) > MAX_LEN_MESSAGE: print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}") message = message[:MAX_LEN_MESSAGE] + "..." if len(message) != 0: md_report = { "type": "section", "text": {"type": "mrkdwn", "text": message}, } payload.append(md_report) action_button = { "type": "section", "text": {"type": "mrkdwn", "text": "*For more details:*"}, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/trl/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) date_report = { "type": "context", "elements": [ { "type": "plain_text", "text": f"On Push - main {os.environ.get('TEST_TYPE')} test results for {date.today()}", }, ], } payload.append(date_report) print(payload) try: client = WebClient(token=os.environ.get("SLACK_API_TOKEN")) response = client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload) if response["ok"]: logger.info("Message sent successfully to Slack.") else: logger.error(f"Failed to send message to Slack: {response['error']}") except Exception as e: logger.error(f"Error sending message to Slack: {str(e)}") if __name__ == "__main__": args = parser.parse_args() main(args.text_file_name, args.slack_channel_name)
trl/scripts/log_example_reports.py/0
{ "file_path": "trl/scripts/log_example_reports.py", "repo_id": "trl", "token_count": 2641 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from trl.core import masked_mean, masked_var, masked_whiten class CoreTester(unittest.TestCase): """ A wrapper class for testing core utils functions """ def setUp(self): self.test_input = torch.Tensor([1, 2, 3, 4]) self.test_mask = torch.Tensor([0, 1, 1, 0]) self.test_input_unmasked = self.test_input[1:3] def test_masked_mean(self): self.assertEqual(torch.mean(self.test_input_unmasked), masked_mean(self.test_input, self.test_mask)) def test_masked_var(self): self.assertEqual(torch.var(self.test_input_unmasked), masked_var(self.test_input, self.test_mask)) def test_masked_whiten(self): def whiten(values: torch.Tensor) -> torch.Tensor: mean, var = torch.mean(values), torch.var(values) return (values - mean) * torch.rsqrt(var + 1e-8) whiten_unmasked = whiten(self.test_input_unmasked) whiten_masked = masked_whiten(self.test_input, self.test_mask)[1:3] diffs = (whiten_unmasked - whiten_masked).sum() self.assertLess(abs(diffs.item()), 0.00001)
trl/tests/test_core.py/0
{ "file_path": "trl/tests/test_core.py", "repo_id": "trl", "token_count": 653 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from datasets import load_dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer from transformers.testing_utils import require_peft, require_torch_accelerator from transformers.utils import is_peft_available from trl import OnlineDPOConfig, OnlineDPOTrainer, is_llm_blender_available, is_vllm_available from .testing_utils import RandomPairwiseJudge if is_peft_available(): from peft import LoraConfig, get_peft_model class TestOnlineDPOTrainer(unittest.TestCase): def setUp(self): self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.model = AutoModelForCausalLM.from_pretrained(self.model_id) self.ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.tokenizer.pad_token = self.tokenizer.eos_token self.reward_model_id = "trl-internal-testing/tiny-LlamaForCausalLM-3.2" self.reward_model = AutoModelForSequenceClassification.from_pretrained(self.reward_model_id, num_labels=1) self.reward_tokenizer = AutoTokenizer.from_pretrained(self.reward_model_id) self.reward_tokenizer.pad_token = self.reward_tokenizer.eos_token @parameterized.expand([("standard_prompt_only",), ("conversational_prompt_only",)]) def test_training(self, config_name): with tempfile.TemporaryDirectory() as tmp_dir: training_args = OnlineDPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, learning_rate=5.0e-7, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", config_name) trainer = OnlineDPOTrainer( model=self.model, reward_model=self.reward_model, args=training_args, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], processing_class=self.tokenizer, reward_processing_class=self.reward_tokenizer, ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1]) def test_training_with_ref_model(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = OnlineDPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, learning_rate=5.0e-7, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") trainer = OnlineDPOTrainer( model=self.model, ref_model=self.ref_model, reward_model=self.reward_model, args=training_args, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], processing_class=self.tokenizer, reward_processing_class=self.reward_tokenizer, ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1]) def test_ref_model_is_model(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = OnlineDPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") with self.assertRaises(ValueError): OnlineDPOTrainer( model=self.model, ref_model=self.model, # ref_model can't be the same as model reward_model=self.reward_model, args=training_args, train_dataset=dummy_dataset["train"], processing_class=self.tokenizer, reward_processing_class=self.reward_tokenizer, ) @require_peft def test_training_with_peft(self): lora_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM") with tempfile.TemporaryDirectory() as tmp_dir: training_args = OnlineDPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, learning_rate=5.0e-7, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") trainer = OnlineDPOTrainer( model=self.model, reward_model=self.reward_model, args=training_args, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], processing_class=self.tokenizer, reward_processing_class=self.reward_tokenizer, peft_config=lora_config, ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1]) @require_peft def test_training_with_peft_and_ref_model(self): lora_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM") with tempfile.TemporaryDirectory() as tmp_dir: training_args = OnlineDPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, learning_rate=5.0e-7, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") trainer = OnlineDPOTrainer( model=self.model, ref_model=self.ref_model, reward_model=self.reward_model, args=training_args, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], processing_class=self.tokenizer, reward_processing_class=self.reward_tokenizer, peft_config=lora_config, ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1]) @require_peft def test_training_with_peft_model_and_peft_config(self): model_lora_config = LoraConfig(r=8, lora_alpha=16, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM") model = get_peft_model(self.model, model_lora_config) # we want only the "train adapter" to be trained lora_train_config = LoraConfig(r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM") with tempfile.TemporaryDirectory() as tmp_dir: training_args = OnlineDPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, learning_rate=5.0e-7, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only") trainer = OnlineDPOTrainer( model=model, reward_model=self.reward_model, args=training_args, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], processing_class=self.tokenizer, reward_processing_class=self.reward_tokenizer, peft_config=lora_train_config, ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1]) @unittest.skipIf(not is_llm_blender_available(), "llm-blender is not available") @parameterized.expand([("standard_prompt_only",), ("conversational_prompt_only",)]) def test_training_with_judge(self, config_name): with tempfile.TemporaryDirectory() as tmp_dir: training_args = OnlineDPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, learning_rate=5.0e-7, eval_strategy="steps", report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", config_name) trainer = OnlineDPOTrainer( model=self.model, judge=RandomPairwiseJudge(), args=training_args, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], processing_class=self.tokenizer, ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1]) @unittest.skipIf(not is_vllm_available(), "vllm is not available") @parameterized.expand([("standard_prompt_only",), ("conversational_prompt_only",)]) @require_torch_accelerator def test_training_with_vllm(self, config_name): model_id = "trl-internal-testing/small-Qwen2ForCausalLM-2.5" # We neeed a bigger model model = AutoModelForCausalLM.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token with tempfile.TemporaryDirectory() as tmp_dir: training_args = OnlineDPOConfig( output_dir=tmp_dir, use_vllm=True, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", config_name) trainer = OnlineDPOTrainer( model=model, reward_model=self.reward_model, args=training_args, train_dataset=dummy_dataset["train"], processing_class=tokenizer, reward_processing_class=self.reward_tokenizer, ) trainer.train() # Check if training loss is available self.assertIn("train_loss", trainer.state.log_history[-1])
trl/tests/test_online_dpo_trainer.py/0
{ "file_path": "trl/tests/test_online_dpo_trainer.py", "repo_id": "trl", "token_count": 5519 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import warnings from collections.abc import Mapping from contextlib import contextmanager from typing import Optional, Union import numpy as np import torch from transformers import is_torch_npu_available, is_torch_xpu_available def flatten_dict(nested: dict, sep: str = "/") -> dict: """Flatten dictionary and concatenate nested keys with separator.""" def recurse(nest: dict, prefix: str, into: dict) -> None: for k, v in nest.items(): if sep in k: raise ValueError(f"separator '{sep}' not allowed to be in key '{k}'") if isinstance(v, Mapping): recurse(v, prefix + k + sep, into) else: into[prefix + k] = v flat = {} recurse(nested, "", flat) return flat def masked_mean(values: torch.Tensor, mask: torch.Tensor, axis: Optional[bool] = None) -> torch.Tensor: """Compute mean of tensor with a masked values.""" if axis is not None: return (values * mask).sum(axis=axis) / mask.sum(axis=axis) else: return (values * mask).sum() / mask.sum() def masked_var(values: torch.Tensor, mask: torch.Tensor, unbiased: bool = True) -> torch.Tensor: """Compute variance of tensor with masked values.""" mean = masked_mean(values, mask) centered_values = values - mean variance = masked_mean(centered_values**2, mask) if unbiased: mask_sum = mask.sum() if mask_sum == 0: raise ValueError( "The sum of the mask is zero, which can happen when `mini_batch_size=1`;" "try increase the `mini_batch_size` or `gradient_accumulation_steps`" ) # note that if mask_sum == 1, then there is a division by zero issue # to avoid it you just need to use a larger minibatch_size bessel_correction = mask_sum / (mask_sum - 1) variance = variance * bessel_correction return variance def masked_whiten(values: torch.Tensor, mask: torch.Tensor, shift_mean: bool = True) -> torch.Tensor: """Whiten values with masked values.""" mean, var = masked_mean(values, mask), masked_var(values, mask) whitened = (values - mean) * torch.rsqrt(var + 1e-8) if not shift_mean: whitened += mean return whitened class LengthSampler: """ Samples a length """ def __init__(self, min_value: int, max_value: int): self.values = list(range(min_value, max_value)) def __call__(self) -> int: return np.random.choice(self.values) class PPODecorators: optimize_device_cache = False @classmethod @contextmanager def empty_device_cache(cls): yield if cls.optimize_device_cache: if is_torch_xpu_available(): gc.collect() torch.xpu.empty_cache() gc.collect() elif is_torch_npu_available(): gc.collect() torch.npu.empty_cache() gc.collect() elif torch.cuda.is_available(): gc.collect() torch.cuda.empty_cache() gc.collect() def randn_tensor( shape: Union[tuple, list], generator: Optional[Union[list[torch.Generator], torch.Generator]] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, layout: Optional[torch.layout] = None, ) -> torch.Tensor: """A helper function to create random tensors on the desired `device` with the desired `dtype`. When passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor is always created on the CPU. """ # device on which tensor is created defaults to device rand_device = device batch_size = shape[0] layout = layout or torch.strided device = device or torch.device("cpu") if generator is not None: gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type if gen_device_type != device.type and gen_device_type == "cpu": rand_device = "cpu" if device != "mps": warnings.warn( f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" f" slighly speed up this function by passing a generator that was created on the {device} device.", UserWarning, ) elif gen_device_type != device.type and gen_device_type == "cuda": raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") # make sure generator list of length 1 is treated like a non-list if isinstance(generator, list) and len(generator) == 1: generator = generator[0] if isinstance(generator, list): shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) return latents
trl/trl/core.py/0
{ "file_path": "trl/trl/core.py", "repo_id": "trl", "token_count": 2373 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import random import textwrap import warnings from collections import defaultdict from contextlib import nullcontext from typing import Any, Callable, Literal, Optional, Union import numpy as np import pandas as pd import torch import torch.amp as amp import torch.nn as nn import torch.nn.functional as F import transformers from accelerate import PartialState from datasets import Dataset from packaging import version from torch.utils.data import DataLoader from transformers import ( AutoModelForCausalLM, BaseImageProcessor, DataCollator, FeatureExtractionMixin, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, Trainer, is_comet_available, is_wandb_available, ) from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalLoopOutput from transformers.utils import is_peft_available, is_torch_fx_proxy from ..data_utils import maybe_apply_chat_template, maybe_extract_prompt from .cpo_config import CPOConfig from .utils import ( DPODataCollatorWithPadding, add_bos_token_if_needed, add_eos_token_if_needed, disable_dropout_in_model, generate_model_card, get_comet_experiment_url, log_table_to_comet_experiment, pad_to_length, peft_module_casting_to_bf16, selective_log_softmax, ) if is_peft_available(): from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training if is_wandb_available(): import wandb class CPOTrainer(Trainer): r""" Initialize CPOTrainer. Args: model (`transformers.PreTrainedModel`): The model to train, preferably an `AutoModelForSequenceClassification`. args (`CPOConfig`): The CPO config arguments to use for training. data_collator (`transformers.DataCollator`): The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences. train_dataset (`datasets.Dataset`): The dataset to use for training. eval_dataset (`datasets.Dataset`): The dataset to use for evaluation. processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*): Processing class used to process the data. If provided, will be used to automatically process the inputs for the model, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model. model_init (`Callable[[], transformers.PreTrainedModel]`): The model initializer to use for training. If None is specified, the default model initializer will be used. callbacks (`list[transformers.TrainerCallback]`): The callbacks to use for training. optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): The optimizer and scheduler to use for training. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): The function to use to preprocess the logits before computing the metrics. peft_config (`dict`, defaults to `None`): The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model. compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*): The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to metric values. """ _tag_names = ["trl", "cpo"] def __init__( self, model: Optional[Union[PreTrainedModel, nn.Module, str]] = None, args: Optional[CPOConfig] = None, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ] = None, model_init: Optional[Callable[[], PreTrainedModel]] = None, callbacks: Optional[list[TrainerCallback]] = None, optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, peft_config: Optional[dict] = None, compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None, ): if args.model_init_kwargs is None: model_init_kwargs = {} elif not isinstance(model, str): raise ValueError("You passed model_kwargs to the CPOTrainer. But your model is already instantiated.") else: model_init_kwargs = args.model_init_kwargs torch_dtype = model_init_kwargs.get("torch_dtype") if torch_dtype is not None: # Convert to `torch.dtype` if an str is passed if isinstance(torch_dtype, str) and torch_dtype != "auto": torch_dtype = getattr(torch, torch_dtype) if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype): raise ValueError( f"Invalid `torch_dtype` passed to the CPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}." ) model_init_kwargs["torch_dtype"] = torch_dtype if isinstance(model, str): model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) # Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16` # has been called in order to properly call autocast if needed. self._peft_has_been_casted_to_bf16 = False if not is_peft_available() and peft_config is not None: raise ValueError( "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models" ) elif is_peft_available() and peft_config is not None: # if model is a peft model and we have a peft_config, we merge and unload it first if isinstance(model, PeftModel): model = model.merge_and_unload() if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False): _support_gc_kwargs = hasattr( args, "gradient_checkpointing_kwargs" ) and "gradient_checkpointing_kwargs" in list( inspect.signature(prepare_model_for_kbit_training).parameters ) prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing} if _support_gc_kwargs: prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) elif getattr(args, "gradient_checkpointing", False): # For backward compatibility with older versions of transformers if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # get peft model with the given config model = get_peft_model(model, peft_config) if args.bf16 and getattr(model, "is_loaded_in_4bit", False): peft_module_casting_to_bf16(model) # If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager self._peft_has_been_casted_to_bf16 = True # For models that use gradient_checkpointing, we need to attach a hook that enables input # to explicitly have `requires_grad=True`, otherwise training will either silently # fail or completely fail. elif getattr(args, "gradient_checkpointing", False): # For backward compatibility with older versions of transformers if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) if args.generate_during_eval and not (is_wandb_available() or is_comet_available()): raise ValueError( "`generate_during_eval=True` requires Weights and Biases or Comet to be installed." " Please install `wandb` or `comet-ml` to resolve." ) if model is not None: self.is_encoder_decoder = model.config.is_encoder_decoder elif args.is_encoder_decoder is None: raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.") else: self.is_encoder_decoder = args.is_encoder_decoder if self.is_encoder_decoder: self.decoder_start_token_id = model.config.decoder_start_token_id self.pad_token_id = model.config.pad_token_id if processing_class is None: raise ValueError("processing_class must be specified to tokenize a CPO dataset.") if args.max_length is None: warnings.warn( "`max_length` is not set in the CPOConfig's init" " it will default to `512` by default, but you should do it yourself in the future.", UserWarning, ) max_length = 512 else: max_length = args.max_length if args.max_prompt_length is None: warnings.warn( "`max_prompt_length` is not set in the CPOConfig's init" " it will default to `128` by default, but you should do it yourself in the future.", UserWarning, ) max_prompt_length = 128 else: max_prompt_length = args.max_prompt_length if args.max_completion_length is None and self.is_encoder_decoder: warnings.warn( "When using an encoder decoder architecture, you should set `max_completion_length` in the CPOConfig's init" " it will default to `128` by default, but you should do it yourself in the future.", UserWarning, ) max_completion_length = 128 else: max_completion_length = args.max_completion_length if data_collator is None: data_collator = DPODataCollatorWithPadding( pad_token_id=processing_class.pad_token_id, label_pad_token_id=args.label_pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) if args.remove_unused_columns: args.remove_unused_columns = False # warn users warnings.warn( "When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments" " we have set it for you, but you should do it yourself in the future.", UserWarning, ) self.use_dpo_data_collator = True else: self.use_dpo_data_collator = False # Disable dropout in the model if args.disable_dropout: disable_dropout_in_model(model) self.max_length = max_length self.generate_during_eval = args.generate_during_eval self.label_pad_token_id = args.label_pad_token_id self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id self.max_prompt_length = max_prompt_length self.truncation_mode = args.truncation_mode self.max_completion_length = max_completion_length self.processing_class = processing_class if args.loss_type in ["hinge", "ipo"] and args.label_smoothing > 0: warnings.warn( f"You are using the {args.loss_type} loss type that does not support label smoothing. The " "`label_smoothing` parameter will be ignored. Set `label_smoothing` to `0.0` to remove this warning.", UserWarning, ) if args.loss_type == "kto_pair": raise ValueError("Support for kto_pair has been removed in CPOTrainer. Please use KTOTrainer.") self.beta = args.beta self.label_smoothing = args.label_smoothing self.loss_type = args.loss_type self.cpo_alpha = args.cpo_alpha self.aux_loss_enabled = getattr(model.config, "output_router_logits", False) self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0) if self.aux_loss_enabled and self.aux_loss_coef == 0.0: warnings.warn( "You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to " "`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value " "greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary " "loss.", UserWarning, ) if args.loss_type == "simpo": self.simpo_gamma = args.simpo_gamma self._stored_metrics = defaultdict(lambda: defaultdict(list)) # The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the # input tensor associated with the key "input_ids". However, in CPO, the sampled data does not include the # "input_ids" key. Instead, the available keys are "prompt_input_ids", "chosen_input_ids", and # "rejected_input_ids". As a result, the trainer issues the warning: "Could not estimate the number of tokens # of the input, floating-point operations will not be computed." To suppress this warning, we set the # "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate # that the warning has already been issued. model.warnings_issued["estimate_tokens"] = True # Compute that only on the main process for faster data processing. # see: https://github.com/huggingface/trl/pull/1255 with PartialState().local_main_process_first(): # Extract the prompt if needed, and apply the chat template if needed train_dataset = train_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc) train_dataset = train_dataset.map( maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc ) if eval_dataset is not None: eval_dataset = eval_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc) eval_dataset = eval_dataset.map( maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc, ) # tokenize the dataset train_dataset = train_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc) if eval_dataset is not None: eval_dataset = eval_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc) super().__init__( model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, processing_class=processing_class, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) # Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the # model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set # self.model_accepts_loss_kwargs to False to enable scaling. self.model_accepts_loss_kwargs = False # Add tags for models that have been loaded with the correct transformers version if hasattr(self.model, "add_model_tags"): self.model.add_model_tags(self._tag_names) if not hasattr(self, "accelerator"): raise AttributeError( "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`." ) def build_tokenized_answer(self, prompt, answer): """ Llama tokenizer does satisfy `enc(a + b) = enc(a) + enc(b)`. It does ensure `enc(a + b) = enc(a) + enc(a + b)[len(enc(a)):]`. Reference: https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257 """ full_tokenized = self.processing_class(prompt + answer, add_special_tokens=False) prompt_input_ids = self.processing_class(prompt, add_special_tokens=False)["input_ids"] answer_input_ids = full_tokenized["input_ids"][len(prompt_input_ids) :] answer_attention_mask = full_tokenized["attention_mask"][len(prompt_input_ids) :] # Concat tokens to form `enc(a) + enc(a + b)[len(enc(a)):]` full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids]) # Prepare input tokens for token by token comparison full_input_ids = np.array(full_tokenized["input_ids"]) if len(full_input_ids) != len(full_concat_input_ids): raise ValueError("Prompt input ids and answer input ids should have the same length.") # On some tokenizers, like Llama-2 tokenizer, there are occasions where tokens # can be merged together when tokenizing prompt+answer. This could result # on the last token from the prompt being different when tokenized on its own # vs when done as prompt+answer. response_token_ids_start_idx = len(prompt_input_ids) # If tokenized prompt is different than both prompt+answer, then it means the # last token has changed due to merging. if prompt_input_ids != full_tokenized["input_ids"][:response_token_ids_start_idx]: response_token_ids_start_idx -= 1 prompt_input_ids = full_tokenized["input_ids"][:response_token_ids_start_idx] prompt_attention_mask = full_tokenized["attention_mask"][:response_token_ids_start_idx] if len(prompt_input_ids) != len(prompt_attention_mask): raise ValueError("Prompt input ids and attention mask should have the same length.") answer_input_ids = full_tokenized["input_ids"][response_token_ids_start_idx:] answer_attention_mask = full_tokenized["attention_mask"][response_token_ids_start_idx:] return dict( prompt_input_ids=prompt_input_ids, prompt_attention_mask=prompt_attention_mask, input_ids=answer_input_ids, attention_mask=answer_attention_mask, ) def tokenize_row(self, feature, model: Optional[Union[PreTrainedModel, nn.Module]] = None) -> dict: """Tokenize a single row from a CPO specific dataset. At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation in case the prompt + chosen or prompt + rejected responses is/are too long. First we truncate the prompt; if we're still too long, we truncate the chosen/rejected. We also create the labels for the chosen/rejected responses, which are of length equal to the sum of the length of the prompt and the chosen/rejected response, with label_pad_token_id for the prompt tokens. """ batch = {} prompt = feature["prompt"] chosen = feature["chosen"] rejected = feature["rejected"] if not self.is_encoder_decoder: # Check issues below for more details # 1. https://github.com/huggingface/trl/issues/907 # 2. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257 # 3. https://github.com/LianjiaTech/BELLE/issues/337 if not isinstance(prompt, str): raise ValueError(f"prompt should be an str but got {type(prompt)}") prompt_tokens = self.processing_class(prompt, add_special_tokens=False) prompt_tokens = {f"prompt_{k}": v for k, v in prompt_tokens.items()} if not isinstance(chosen, str): raise ValueError(f"chosen should be an str but got {type(chosen)}") chosen_tokens = self.build_tokenized_answer(prompt, chosen) if not isinstance(rejected, str): raise ValueError(f"rejected should be an str but got {type(rejected)}") rejected_tokens = self.build_tokenized_answer(prompt, rejected) # Last prompt token might get merged by tokenizer and # it should not be included for generation if that happens prompt_len_input_ids = len(prompt_tokens["prompt_input_ids"]) chosen_prompt_len_input_ids = len(chosen_tokens["prompt_input_ids"]) rejected_prompt_len_input_ids = len(rejected_tokens["prompt_input_ids"]) prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids) for k, v in prompt_tokens.items(): prompt_tokens[k] = v[:prompt_len_input_ids] # Make sure prompts only have one different token at most an # and length only differs by 1 at most num_diff_tokens = sum( [a != b for a, b in zip(chosen_tokens["prompt_input_ids"], rejected_tokens["prompt_input_ids"])] ) num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids) if num_diff_tokens > 1 or num_diff_len > 1: raise ValueError( "Chosen and rejected prompt_input_ids might only differ on the " "last token due to tokenizer merge ops." ) # add BOS token to head of prompt. Avoid adding if it's already there prompt_tokens, chosen_tokens, rejected_tokens = add_bos_token_if_needed( self.processing_class.bos_token_id, prompt_len_input_ids, prompt_tokens, chosen_prompt_len_input_ids, chosen_tokens, rejected_prompt_len_input_ids, rejected_tokens, ) # add EOS token to end of answer. Avoid adding if it's already there chosen_tokens, rejected_tokens = add_eos_token_if_needed( self.processing_class.eos_token_id, chosen_tokens, rejected_tokens ) longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"])) # if combined sequence is too long, truncate the prompt for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]: if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length: if self.truncation_mode == "keep_start": for k in ["prompt_input_ids", "prompt_attention_mask"]: answer_tokens[k] = answer_tokens[k][: self.max_prompt_length] elif self.truncation_mode == "keep_end": for k in ["prompt_input_ids", "prompt_attention_mask"]: answer_tokens[k] = answer_tokens[k][-self.max_prompt_length :] else: raise ValueError(f"Unknown truncation mode: {self.truncation_mode}") # if that's still too long, truncate the response for answer_tokens in [chosen_tokens, rejected_tokens]: if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length: for k in ["input_ids", "attention_mask"]: answer_tokens[k] = answer_tokens[k][: self.max_length - self.max_prompt_length] # Create labels chosen_sequence_tokens = { k: chosen_tokens[f"prompt_{k}"] + chosen_tokens[k] for k in ["input_ids", "attention_mask"] } rejected_sequence_tokens = { k: rejected_tokens[f"prompt_{k}"] + rejected_tokens[k] for k in ["input_ids", "attention_mask"] } chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:] chosen_sequence_tokens["labels"][: len(chosen_tokens["prompt_input_ids"])] = [ self.label_pad_token_id ] * len(chosen_tokens["prompt_input_ids"]) rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:] rejected_sequence_tokens["labels"][: len(rejected_tokens["prompt_input_ids"])] = [ self.label_pad_token_id ] * len(rejected_tokens["prompt_input_ids"]) for k, toks in { "chosen_": chosen_sequence_tokens, "rejected_": rejected_sequence_tokens, "": prompt_tokens, }.items(): for type_key, tokens in toks.items(): if type_key == "token_type_ids": continue batch[f"{k}{type_key}"] = tokens else: chosen_tokens = self.processing_class( chosen, truncation=True, max_length=self.max_completion_length, add_special_tokens=True ) rejected_tokens = self.processing_class( rejected, truncation=True, max_length=self.max_completion_length, add_special_tokens=True ) prompt_tokens = self.processing_class( prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True ) batch["chosen_labels"] = chosen_tokens["input_ids"] batch["rejected_labels"] = rejected_tokens["input_ids"] batch["prompt_input_ids"] = prompt_tokens["input_ids"] batch["prompt_attention_mask"] = prompt_tokens["attention_mask"] if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"): batch["rejected_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels( labels=torch.tensor(batch["rejected_labels"]) ) batch["chosen_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels( labels=torch.tensor(batch["chosen_labels"]) ) return batch @staticmethod def concatenated_inputs( batch: dict[str, Union[list, torch.LongTensor]], is_encoder_decoder: bool = False, label_pad_token_id: int = -100, padding_value: int = 0, device: Optional[torch.device] = None, ) -> dict[str, torch.LongTensor]: """Concatenate the chosen and rejected inputs into a single tensor. Args: batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length). is_encoder_decoder: Whether the model is an encoder-decoder model. label_pad_token_id: The label pad token id. padding_value: The padding value to use for the concatenated inputs_ids. device: The device for the concatenated inputs. Returns: A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'. """ concatenated_batch = {} if is_encoder_decoder: max_length = max(batch["chosen_labels"].shape[1], batch["rejected_labels"].shape[1]) else: max_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1]) for k in batch: if k.startswith("chosen") and isinstance(batch[k], torch.Tensor): if "labels" in k or is_encoder_decoder: pad_value = label_pad_token_id elif k.endswith("_input_ids"): pad_value = padding_value elif k.endswith("_attention_mask"): pad_value = 0 concatenated_key = k.replace("chosen", "concatenated") concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value) for k in batch: if k.startswith("rejected") and isinstance(batch[k], torch.Tensor): if "labels" in k or is_encoder_decoder: pad_value = label_pad_token_id elif k.endswith("_input_ids"): pad_value = padding_value elif k.endswith("_attention_mask"): pad_value = 0 concatenated_key = k.replace("rejected", "concatenated") concatenated_batch[concatenated_key] = torch.cat( ( concatenated_batch[concatenated_key], pad_to_length(batch[k], max_length, pad_value=pad_value), ), dim=0, ).to(device=device) if is_encoder_decoder: concatenated_batch["concatenated_input_ids"] = batch["prompt_input_ids"].repeat(2, 1).to(device=device) concatenated_batch["concatenated_attention_mask"] = ( batch["prompt_attention_mask"].repeat(2, 1).to(device=device) ) return concatenated_batch def cpo_loss( self, policy_chosen_logps: torch.FloatTensor, policy_rejected_logps: torch.FloatTensor, ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: """Compute the CPO loss for a batch of policy and reference model log probabilities. Args: policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,) policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,) Returns: A tuple of three tensors: (losses, chosen_rewards, rejected_rewards). The losses tensor contains the CPO loss for each example in the batch. The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively. """ logits = (policy_chosen_logps - policy_rejected_logps).to(self.accelerator.device) # The beta is a temperature parameter for the CPO loss, typically something in the range of 0.1 to 0.5. # We ignore the reference model as beta -> 0. The label_smoothing parameter encodes our uncertainty about the labels and # calculates a conservative CPO loss. if self.loss_type == "simpo": gamma_logratios = self.simpo_gamma / self.beta logits = logits - gamma_logratios # This reduces to Equation 3 from the CPO paper when label_smoothing -> 0. losses = ( -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * logits) * self.label_smoothing ) elif self.loss_type == "sigmoid": # This reduces to Equation 3 from the CPO paper when label_smoothing -> 0. losses = ( -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * logits) * self.label_smoothing ) elif self.loss_type == "hinge": losses = torch.relu(1 - self.beta * logits) elif self.loss_type == "ipo": # eqn (17) of the paper where beta is the regularization parameter for the IPO loss, denoted by tau in the paper. losses = (logits - 1 / (2 * self.beta)) ** 2 else: raise ValueError( f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'simpo']" ) chosen_rewards = self.beta * (policy_chosen_logps.to(self.accelerator.device)).detach() rejected_rewards = self.beta * (policy_rejected_logps.to(self.accelerator.device)).detach() return losses, chosen_rewards, rejected_rewards @staticmethod def get_batch_logps( logits: torch.FloatTensor, labels: torch.LongTensor, average_log_prob: bool = False, label_pad_token_id: int = -100, is_encoder_decoder: bool = False, ) -> torch.FloatTensor: """Compute the log probabilities of the given labels under the given logits. Args: logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size) labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length) average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens. label_pad_token_id: The label pad token id. is_encoder_decoder: Whether the model is an encoder-decoder model. Returns: A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits. """ if logits.shape[:-1] != labels.shape: raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.") if not is_encoder_decoder: labels = labels[:, 1:].clone() logits = logits[:, :-1, :] loss_mask = labels != label_pad_token_id # dummy token; we'll ignore the losses on these tokens later labels[labels == label_pad_token_id] = 0 per_token_logps = selective_log_softmax(logits, labels) if average_log_prob: return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) else: return (per_token_logps * loss_mask).sum(-1) def concatenated_forward( self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]] ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: """Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together. We do this to avoid doing two forward passes, because it's faster for FSDP. """ concatenated_batch = self.concatenated_inputs( batch, is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id, padding_value=self.padding_value, device=self.accelerator.device, ) len_chosen = batch["chosen_labels"].shape[0] model_kwargs = ( { "decoder_input_ids": self._shift_right(concatenated_batch["concatenated_labels"]), } if self.is_encoder_decoder else {} ) if self.aux_loss_enabled: model_kwargs["output_router_logits"] = True outputs = model( concatenated_batch["concatenated_input_ids"], attention_mask=concatenated_batch["concatenated_attention_mask"], use_cache=False, **model_kwargs, ) all_logits = outputs.logits def cross_entropy_loss(logits, labels): if not self.is_encoder_decoder: # Shift so that tokens < n predict n logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = nn.CrossEntropyLoss() logits = logits.view(-1, logits.shape[-1]) labels = labels.view(-1) # Enable model parallelism labels = labels.to(logits.device) loss = loss_fct(logits, labels) return loss labels = concatenated_batch["concatenated_labels"].clone() if self.cpo_alpha == 0: nll_loss = torch.tensor(0.0).to(self.accelerator.device) else: nll_loss = cross_entropy_loss(all_logits[:len_chosen], labels[:len_chosen]) all_logps = self.get_batch_logps( all_logits, concatenated_batch["concatenated_labels"], average_log_prob=self.loss_type in ["ipo", "simpo"], is_encoder_decoder=self.is_encoder_decoder, label_pad_token_id=self.label_pad_token_id, ) chosen_logps = all_logps[:len_chosen] rejected_logps = all_logps[len_chosen:] chosen_logits = all_logits[:len_chosen] rejected_logits = all_logits[len_chosen:] if self.aux_loss_enabled: return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss, outputs.aux_loss) return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss) def get_batch_loss_metrics( self, model, batch: dict[str, Union[list, torch.LongTensor]], train_eval: Literal["train", "eval"] = "train", ): """Compute the CPO loss and other metrics for the given batch of inputs for train or test.""" metrics = {} forward_output = self.concatenated_forward(model, batch) ( policy_chosen_logps, policy_rejected_logps, policy_chosen_logits, policy_rejected_logits, policy_nll_loss, ) = forward_output[:5] if self.aux_loss_enabled: aux_loss = forward_output[5] losses, chosen_rewards, rejected_rewards = self.cpo_loss( policy_chosen_logps, policy_rejected_logps, ) loss = losses.mean() + self.cpo_alpha * policy_nll_loss reward_accuracies = (chosen_rewards > rejected_rewards).float() prefix = "eval_" if train_eval == "eval" else "" metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(chosen_rewards).mean().item() metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(rejected_rewards).mean().item() metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(reward_accuracies).mean().item() metrics[f"{prefix}rewards/margins"] = ( self.accelerator.gather_for_metrics(chosen_rewards - rejected_rewards).mean().item() ) metrics[f"{prefix}logps/rejected"] = ( self.accelerator.gather_for_metrics(policy_rejected_logps).detach().mean().item() ) metrics[f"{prefix}logps/chosen"] = ( self.accelerator.gather_for_metrics(policy_chosen_logps).detach().mean().item() ) metrics[f"{prefix}logits/rejected"] = ( self.accelerator.gather_for_metrics(policy_rejected_logits).detach().mean().item() ) metrics[f"{prefix}logits/chosen"] = ( self.accelerator.gather_for_metrics(policy_chosen_logits).detach().mean().item() ) metrics[f"{prefix}nll_loss"] = self.accelerator.gather_for_metrics(policy_nll_loss).detach().mean().item() if self.aux_loss_enabled: loss += self.aux_loss_coef * aux_loss return loss, metrics def compute_loss( self, model: Union[PreTrainedModel, nn.Module], inputs: dict[str, Union[torch.Tensor, Any]], return_outputs=False, num_items_in_batch=None, ) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]: compute_loss_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext() with compute_loss_context_manager: loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train") # force log the metrics self.store_metrics(metrics, train_eval="train") if return_outputs: return (loss, metrics) return loss def generate_from_model(self, model, batch: dict[str, torch.LongTensor]) -> str: """Generate samples from the model and reference model for the given batch of inputs.""" # If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with # the torch cuda amp context manager as some hidden states are silently casted to full precision. generate_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext() with generate_context_manager: policy_output = model.generate( input_ids=batch["prompt_input_ids"], attention_mask=batch["prompt_attention_mask"], max_length=self.max_length, do_sample=True, pad_token_id=self.processing_class.pad_token_id, ) policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id) policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True) return policy_output_decoded def prediction_step( self, model: Union[PreTrainedModel, nn.Module], inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]] = None, ): if ignore_keys is None: if hasattr(model, "config"): ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", []) else: ignore_keys = [] prediction_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext() with torch.no_grad(), prediction_context_manager: loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval") # force log the metrics self.store_metrics(metrics, train_eval="eval") if prediction_loss_only: return (loss.detach(), None, None) # logits for the chosen and rejected samples from model logits_dict = { "eval_logits/chosen": metrics["eval_logits/chosen"], "eval_logits/rejected": metrics["eval_logits/rejected"], } logits = tuple(v.unsqueeze(dim=0) for k, v in logits_dict.items() if k not in ignore_keys) logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device) labels = torch.zeros(logits.shape[0], device=self.accelerator.device) return (loss.detach(), logits, labels) def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None: for key, value in metrics.items(): self._stored_metrics[train_eval][key].append(value) def evaluation_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[list[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: """ Overriding built-in evaluation loop to store metrics for each batch. Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ # Sample and save to game log if requested (for one batch to save time) if self.generate_during_eval: # Generate random indices within the range of the total number of samples num_samples = len(dataloader.dataset) random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size) # Use dataloader.dataset.select to get the random batch without iterating over the DataLoader random_batch_dataset = dataloader.dataset.select(random_indices) random_batch = self.data_collator(random_batch_dataset) random_batch = self._prepare_inputs(random_batch) policy_output_decoded = self.generate_from_model(self.model, random_batch) table = pd.DataFrame( columns=["Prompt", "Policy"], data=[ [prompt, pol[len(prompt) :]] for prompt, pol in zip(random_batch["prompt"], policy_output_decoded) ], ) if "wandb" in self.args.report_to: wandb.log({"game_log": wandb.Table(data=table)}) if "comet_ml" in self.args.report_to: log_table_to_comet_experiment( name="game_log.csv", table=table, ) # Base evaluation initial_output = super().evaluation_loop( dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix ) return initial_output def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None: """ Log `logs` on the various objects watching training, including stored metrics. Args: logs (`dict[str, float]`): The values to log. start_time (`float` or `None`, *optional*, defaults to `None`): Start time of the training. """ # logs either has 'loss' or 'eval_loss' train_eval = "train" if "loss" in logs else "eval" # Add averaged stored metrics to logs for key, metrics in self._stored_metrics[train_eval].items(): logs[key] = torch.tensor(metrics).mean().item() del self._stored_metrics[train_eval] if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"): return super().log(logs, start_time) else: # transformers<=4.46 return super().log(logs) def _shift_right(self, input_ids): if self.decoder_start_token_id is None: raise ValueError( "model.config.decoder_start_token_id has to be defined. It is usually set to the pad_token_id." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), self.decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = self.decoder_start_token_id if self.pad_token_id is None: raise ValueError("model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, self.pad_token_id) return shifted_input_ids def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None tags = tags or [] if isinstance(tags, str): tags = [tags] if hasattr(self.model.config, "unsloth_version"): tags.append("unsloth") citation = textwrap.dedent("""\ @inproceedings{xu2024contrastive, title = {{Contrastive Preference Optimization: Pushing the Boundaries of LLM Performance in Machine Translation}}, author = {Haoran Xu and Amr Sharaf and Yunmo Chen and Weiting Tan and Lingfeng Shen and Benjamin Van Durme and Kenton Murray and Young Jin Kim}, year = 2024, booktitle = {Forty-first International Conference on Machine Learning, {ICML} 2024, Vienna, Austria, July 21-27, 2024}, publisher = {OpenReview.net}, url = {https://openreview.net/forum?id=51iwkioZpn} }""") model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="CPO", trainer_citation=citation, paper_title="Contrastive Preference Optimization: Pushing the Boundaries of LLM Performance in Machine Translation", paper_id="2401.08417", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/cpo_trainer.py/0
{ "file_path": "trl/trl/trainer/cpo_trainer.py", "repo_id": "trl", "token_count": 22633 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from transformers import TrainingArguments @dataclass class OnlineDPOConfig(TrainingArguments): r""" Configuration class for the [`OnlineDPOTrainer`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: learning_rate (`float`, *optional*, defaults to `5e-7`): Initial learning rate for [`AdamW`] optimizer. The default value replaces that of [`~transformers.TrainingArguments`]. reward_model_path (`str` or `None`, *optional*, defaults to `None`): Path to the reward model. Either `judge` or `reward_model_path` must be set, but not both. judge (`str` or `None`, *optional*, defaults to `None`): Name of the judge to use. Either `judge` or `reward_model_path` must be set, but not both. max_new_tokens (`int`, *optional*, defaults to `64`): Maximum number of tokens to generate per completion. max_length (`int`, *optional*, defaults to `256`): Maximum total length of the sequence (prompt + completion) used to compute log probabilities. If the sequence exceeds this limit, the leftmost tokens will be truncated to preserve as much of the completion as possible. temperature (`float`, *optional*, defaults to `0.9`): Temperature for sampling. The higher the temperature, the more random the completions. missing_eos_penalty (`float` or `None`, *optional*, defaults to `None`): Penalty applied to the score when the model fails to generate an EOS token. This is useful to encourage to generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be a positive value. beta (`float` or `list[float]`, *optional*, defaults to `0.1`): Parameter controlling the deviation from the reference model. Higher β means less deviation from the reference model. For the IPO loss (`loss_type="ipo"`), β is the regularization parameter denoted by τ in the [paper](https://huggingface.co/papers/2310.12036). If a list of floats is provided then the β is selected for each new epoch and the last β is used for the rest of the epochs. loss_type (`str`, *optional*, defaults to `"sigmoid"`): Type of loss to use. Possible values are: - `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper. - `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of processes to use for processing the dataset. disable_dropout (`bool`, *optional*, defaults to `True`): Whether to disable dropout in the model and reference model. use_vllm (`bool`, *optional*, defaults to `False`): Whether to use vLLM for generating completions. Requires vLLM to be installed (`pip install vllm`). ds3_gather_for_generation (`bool`, *optional*, defaults to `True`): This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation, improving generation speed. However, disabling this option allows training models that exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation. """ learning_rate: float = field( default=5e-7, metadata={ "help": "Initial learning rate for `AdamW` optimizer. The default value replaces that of " "transformers.TrainingArguments." }, ) reward_model_path: Optional[str] = field( default=None, metadata={ "help": "Path to the reward model. Either `judge` or `reward_model_path` must be set, but not both." }, ) judge: Optional[str] = field( default=None, metadata={ "help": "Name of the judge to use. Either `judge` or `reward_model_path` must be set, but not both." }, ) max_new_tokens: int = field( default=64, metadata={"help": "Maximum number of tokens to generate per completion."}, ) max_length: int = field( default=512, metadata={ "help": "Maximum total length of the sequence (prompt + completion) used to compute log probabilities. If " "the sequence exceeds this limit, the leftmost tokens will be truncated to preserve as much of the " "completion as possible." }, ) temperature: float = field( default=0.9, metadata={"help": "Temperature for sampling. The higher the temperature, the more random the completions."}, ) missing_eos_penalty: Optional[float] = field( default=None, metadata={ "help": "Penalty applied to the score when the model fails to generate an EOS token. This is useful to " "encourage to generate completions shorter than the maximum length (`max_new_tokens`). The penalty must be " "a positive value." }, ) beta: list[float] = field( default_factory=lambda: [0.1], metadata={ "help": "Parameter controlling the deviation from the reference model. Higher β means less deviation from " "the reference model. For the IPO loss (`loss_type='ipo'`), β is the regularization parameter denoted by " "τ in the [paper](https://huggingface.co/papers/2310.12036). If a list of floats is provided then the β " "is selected for each new epoch and the last β is used for the rest of the epochs." }, ) loss_type: str = field( default="sigmoid", metadata={ "help": "Type of loss to use.", "choices": ["sigmoid", "ipo"], }, ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of processes to use for processing the dataset."}, ) disable_dropout: bool = field( default=True, metadata={"help": "Whether to disable dropout in the model."}, ) use_vllm: bool = field( default=False, metadata={ "help": "Whether to use vLLM for generating completions. Requires vLLM to be installed " "(`pip install vllm`)." }, ) ds3_gather_for_generation: bool = field( default=True, metadata={ "help": "This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for " "generation, improving generation speed. However, disabling this option allows training models that " "exceed the VRAM capacity of a single GPU, albeit at the cost of slower generation." }, ) def __post_init__(self): super().__post_init__() if hasattr(self.beta, "__len__") and len(self.beta) == 1: self.beta = self.beta[0]
trl/trl/trainer/online_dpo_config.py/0
{ "file_path": "trl/trl/trainer/online_dpo_config.py", "repo_id": "trl", "token_count": 2921 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import textwrap from typing import Any, Callable, Optional, Union import jinja2 import torch import torch.nn as nn import torch.nn.functional as F from datasets import Dataset, IterableDataset from transformers import ( BaseImageProcessor, FeatureExtractionMixin, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, TrainerCallback, is_apex_available, is_wandb_available, ) from transformers.trainer_utils import EvalPrediction from transformers.training_args import OptimizerNames from ..data_utils import is_conversational, maybe_apply_chat_template from ..models.utils import unwrap_model_for_generation from .judges import BasePairwiseJudge from .online_dpo_trainer import OnlineDPOTrainer from .utils import ( SIMPLE_CHAT_TEMPLATE, empty_cache, generate_model_card, get_comet_experiment_url, get_reward, selective_log_softmax, truncate_right, ) from .xpo_config import XPOConfig if is_apex_available(): from apex import amp if is_wandb_available(): import wandb class XPOTrainer(OnlineDPOTrainer): r""" Initialize XPOTrainer as a subclass of [`OnlineDPOConfig`]. Args: model (`transformers.PreTrainedModel`): The model to train, preferably an `AutoModelForCausalLM`. ref_model (`PreTrainedModelWrapper`): Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized. reward_model (`transformers.PreTrainedModel`): The reward model to score completions with, preferably an `AutoModelForSequenceClassification`. judge (`BasePairwiseJudge`): The judge to use for pairwise comparison of model completions. args (`XPOConfig`): The XPO config arguments to use for training. data_collator (`transformers.DataCollator`): The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences. train_dataset (`datasets.Dataset`): The dataset to use for training. eval_dataset (`datasets.Dataset`): The dataset to use for evaluation. processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*): Processing class used to process the data. If provided, will be used to automatically process the inputs for the model, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model. peft_config (`dict`): The peft config to use for training. compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*): The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to metric values. callbacks (`list[transformers.TrainerCallback]`): The callbacks to use for training. optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): The optimizer and scheduler to use for training. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): The function to use to preprocess the logits before computing the metrics. """ _tag_names = ["trl", "xpo"] def __init__( self, model: Union[PreTrainedModel, nn.Module] = None, ref_model: Union[PreTrainedModel, nn.Module] = None, reward_model: Optional[nn.Module] = None, judge: Optional[BasePairwiseJudge] = None, args: Optional[XPOConfig] = None, data_collator: Optional[Callable] = None, train_dataset: Optional[Union[Dataset, IterableDataset]] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ] = None, peft_config: Optional[dict] = None, compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None, callbacks: Optional[list[TrainerCallback]] = None, optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, ) -> None: super().__init__( model=model, ref_model=ref_model, judge=judge, reward_model=reward_model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, processing_class=processing_class, reward_processing_class=processing_class, # for now, XPOTrainer can't use any reward model peft_config=peft_config, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) self._alpha = self.args.alpha # Overwrite the stats dictionary to include XPO specific statistics self.stats = { # Remove "non_score_reward", "rlhf_reward", "scores" # Add "loss/dpo", "loss/xpo" "loss/dpo": [], "loss/xpo": [], "objective/kl": [], "objective/entropy": [], "rewards/chosen": [], "rewards/rejected": [], "rewards/accuracies": [], "rewards/margins": [], "logps/chosen": [], "logps/rejected": [], # Replace "contain_eos_token" by "model_contain_eos_token" and "ref_contain_eos_token" "val/model_contain_eos_token": [], "val/ref_contain_eos_token": [], "alpha": [], "beta": [], } if self.reward_model is not None: # Replace "scores" by "model_scores" and "ref_scores" self.stats["objective/model_scores"] = [] self.stats["objective/ref_scores"] = [] self.stats["objective/scores_margin"] = [] @property def alpha(self): if isinstance(self._alpha, list): epoch = self.state.epoch return self._alpha[epoch] if epoch < len(self._alpha) else self._alpha[-1] else: return self._alpha def _generate_completions(self, prompts, model): with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: model_output = unwrapped_model.generate( input_ids=prompts["input_ids"], attention_mask=prompts["attention_mask"], generation_config=self.generation_config, ) ref_model = model if self.ref_model is None else self.ref_model with torch.no_grad(), unwrap_model_for_generation(ref_model, self.accelerator) as unwrapped_ref_model: ref_output = unwrapped_ref_model.generate( input_ids=prompts["input_ids"], attention_mask=prompts["attention_mask"], generation_config=self.generation_config, ) return model_output, ref_output def _process_completions(self, model_output, ref_output, prompts): context_length = prompts["input_ids"].shape[1] # Process model completions model_completion_ids = model_output[:, context_length:] model_completion_ids, model_completion_mask = truncate_right( model_completion_ids, self.processing_class.eos_token_id, self.processing_class.pad_token_id ) model_data = { "input_ids": torch.cat((prompts["input_ids"], model_completion_ids), dim=1), "attention_mask": torch.cat((prompts["attention_mask"], model_completion_mask), dim=1), "raw": prompts["raw"], } # Process reference model completions ref_completion_ids = ref_output[:, context_length:] ref_completion_ids, ref_completion_mask = truncate_right( ref_completion_ids, self.processing_class.eos_token_id, self.processing_class.pad_token_id ) ref_data = { "input_ids": torch.cat((prompts["input_ids"], ref_completion_ids), dim=1), "attention_mask": torch.cat((prompts["attention_mask"], ref_completion_mask), dim=1), "raw": prompts["raw"], } return model_data, ref_data def _compute_rewards(self, model_data, ref_data, context_length): with torch.no_grad(): _, model_scores, _ = get_reward( self.reward_model, model_data["input_ids"], self.processing_class.pad_token_id, context_length ) _, ref_scores, _ = get_reward( self.reward_model, ref_data["input_ids"], self.processing_class.pad_token_id, context_length ) # Apply EOS penalty if needed if self.args.missing_eos_penalty is not None: model_contain_eos = torch.any(model_data["input_ids"] == self.processing_class.eos_token_id, dim=-1) ref_contain_eos = torch.any(ref_data["input_ids"] == self.processing_class.eos_token_id, dim=-1) model_scores[~model_contain_eos] -= self.args.missing_eos_penalty ref_scores[~ref_contain_eos] -= self.args.missing_eos_penalty return model_scores, ref_scores def _compute_judge(self, model_data, ref_data, context_length): prompts = model_data["raw"] model_data_completions = self.processing_class.batch_decode( model_data["input_ids"][:, context_length:], skip_special_tokens=True ) model_data_completions = [completion.strip() for completion in model_data_completions] ref_data_completions = self.processing_class.batch_decode( ref_data["input_ids"][:, context_length:], skip_special_tokens=True ) ref_data_completions = [completion.strip() for completion in ref_data_completions] if is_conversational({"prompt": prompts[0]}): model_data_completions = [ [{"role": "assistant", "content": completion}] for completion in model_data_completions ] environment = jinja2.Environment() template = environment.from_string(SIMPLE_CHAT_TEMPLATE) prompts = [template.render(messages=message) for message in prompts] model_data_completions = [template.render(messages=completion) for completion in model_data_completions] ref_data_completions = [ [{"role": "assistant", "content": completion}] for completion in ref_data_completions ] ref_data_completions = [template.render(messages=completion) for completion in ref_data_completions] ranks_of_first_completion = self.judge.judge( prompts, list(zip(model_data_completions, ref_data_completions)), ) # convert ranks to a True/False mask: # when rank == 0, it means the first completion is the best # when rank == 1, it means the second completion is the best return torch.tensor([rank == 0 for rank in ranks_of_first_completion], device=model_data["input_ids"].device) def _compute_logprobs(self, model, model_data, ref_data, context_length): def compute_logprobs_for_data(m, data): output = m(data["input_ids"], attention_mask=data["attention_mask"]) logits = output.logits[:, context_length - 1 : -1] token_logprobs = selective_log_softmax(logits, data["input_ids"][:, context_length:]) return token_logprobs # Compute logprobs for model completions model_logprobs_model_data = compute_logprobs_for_data(model, model_data) # Compute logprobs for model on reference completions (for XPO loss) model_logprobs_ref_data = compute_logprobs_for_data(model, ref_data) # Compute logprobs for reference model completions with torch.no_grad(): if self.ref_model is None: with model.disable_adapter(): ref_logprobs_model_data = compute_logprobs_for_data(model, model_data) ref_logprobs_ref_data = compute_logprobs_for_data(model, ref_data) else: ref_logprobs_model_data = compute_logprobs_for_data(self.ref_model, model_data) ref_logprobs_ref_data = compute_logprobs_for_data(self.ref_model, ref_data) # Mask padding tokens model_padding_mask = model_data["attention_mask"][:, context_length:] == 0 ref_padding_mask = ref_data["attention_mask"][:, context_length:] == 0 model_logprobs_model_data = model_logprobs_model_data.masked_fill(model_padding_mask, 0.0) model_logprobs_ref_data = model_logprobs_ref_data.masked_fill(ref_padding_mask, 0.0) ref_logprobs_ref_data = ref_logprobs_ref_data.masked_fill(ref_padding_mask, 0.0) ref_logprobs_model_data = ref_logprobs_model_data.masked_fill(model_padding_mask, 0.0) return model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data def _compute_losses( self, model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data, chosen_mask, ): # Compute log probs model_logprobs_model_data_sum = model_logprobs_model_data.sum(1) model_logprobs_ref_data_sum = model_logprobs_ref_data.sum(1) ref_logprobs_ref_data_sum = ref_logprobs_ref_data.sum(1) ref_logprobs_model_data_sum = ref_logprobs_model_data.sum(1) chosen_model_logprobs = torch.where(chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) chosen_ref_logprobs = torch.where(chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) chosen_log_ratios = chosen_model_logprobs - chosen_ref_logprobs rejected_model_logprobs = torch.where(~chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) rejected_ref_logprobs = torch.where(~chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) rejected_log_ratios = rejected_model_logprobs - rejected_ref_logprobs # Compute logits as the difference between chosen and rejected log ratios logits = chosen_log_ratios - rejected_log_ratios if self.args.loss_type == "sigmoid": dpo_losses = -F.logsigmoid(self.beta * logits) elif self.args.loss_type == "ipo": dpo_losses = (logits - 1 / (2 * self.beta)) ** 2 else: raise NotImplementedError(f"invalid loss type {self.args.loss_type}") # Compute XPO specific loss xpo_losses = self.alpha * model_logprobs_ref_data_sum # Total loss loss = (dpo_losses + xpo_losses).mean() return loss, dpo_losses, xpo_losses def _log_statistics( self, model_data, ref_data, model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data, chosen_mask, dpo_losses, xpo_losses, context_length, model_scores=None, ref_scores=None, ): # Helper function to gather and compute mean def gather_mean(tensor): return self.accelerator.gather_for_metrics(tensor).mean().item() # Log losses self.stats["loss/dpo"].append(gather_mean(dpo_losses)) self.stats["loss/xpo"].append(gather_mean(xpo_losses)) # Log scores if self.reward_model is not None: self.stats["objective/model_scores"].append(gather_mean(model_scores)) self.stats["objective/ref_scores"].append(gather_mean(ref_scores)) self.stats["objective/scores_margin"].append(gather_mean(model_scores - ref_scores)) # Log logprobs model_logprobs_model_data_sum = model_logprobs_model_data.sum(1) model_logprobs_ref_data_sum = model_logprobs_ref_data.sum(1) ref_logprobs_ref_data_sum = ref_logprobs_ref_data.sum(1) ref_logprobs_model_data_sum = ref_logprobs_model_data.sum(1) chosen_model_logprobs = torch.where(chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) chosen_ref_logprobs = torch.where(chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) chosen_log_ratios = chosen_model_logprobs - chosen_ref_logprobs rejected_model_logprobs = torch.where(~chosen_mask, model_logprobs_model_data_sum, model_logprobs_ref_data_sum) rejected_ref_logprobs = torch.where(~chosen_mask, ref_logprobs_model_data_sum, ref_logprobs_ref_data_sum) rejected_log_ratios = rejected_model_logprobs - rejected_ref_logprobs self.stats["logps/chosen"].append(gather_mean(chosen_model_logprobs.mean() + chosen_ref_logprobs.mean())) self.stats["logps/rejected"].append(gather_mean(rejected_model_logprobs.mean() + rejected_ref_logprobs.mean())) # Log rewards # Compute various statistics chosen_rewards = chosen_log_ratios * self.beta rejected_rewards = rejected_log_ratios * self.beta self.stats["rewards/chosen"].append(gather_mean(chosen_rewards.mean())) self.stats["rewards/rejected"].append(gather_mean(rejected_rewards.mean())) # Calculate KL divergence for model and ref data kl_model_data = model_logprobs_model_data - ref_logprobs_model_data kl_ref_data = model_logprobs_ref_data - ref_logprobs_ref_data mean_kl = (kl_model_data.sum(1) + kl_ref_data.sum(1)).mean() / 2 self.stats["objective/kl"].append(gather_mean(mean_kl)) # Calculate entropy for model and ref data entropy_model_data = -model_logprobs_model_data.sum(1) entropy_ref_data = -model_logprobs_ref_data.sum(1) mean_entropy = (entropy_model_data.mean() + entropy_ref_data.mean()) / 2 self.stats["objective/entropy"].append(gather_mean(mean_entropy)) # Calculate margins margin = chosen_rewards - rejected_rewards self.stats["rewards/margins"].append(gather_mean(margin.mean())) # Calculate accuracy accuracy = (margin > 0).float() self.stats["rewards/accuracies"].append(gather_mean(accuracy.mean())) # Log EOS token statistics model_eos = (model_data["input_ids"][:, context_length:] == self.processing_class.eos_token_id).any(dim=1) ref_eos = (ref_data["input_ids"][:, context_length:] == self.processing_class.eos_token_id).any(dim=1) self.stats["val/model_contain_eos_token"].append(gather_mean(model_eos.float())) self.stats["val/ref_contain_eos_token"].append(gather_mean(ref_eos.float())) # Log alpha and beta self.stats["alpha"].append(self.alpha) self.stats["beta"].append(self.beta) def training_step( self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None ) -> torch.Tensor: model.train() # Apply chat template and tokenize the input batch_size = len(next(iter(inputs.values()))) prompts = inputs["prompt"] inputs = [{k: v[i] for k, v in inputs.items()} for i in range(batch_size)] inputs = [maybe_apply_chat_template(x, self.processing_class) for x in inputs] inputs = [self.tokenize_row(x, self.model.config.is_encoder_decoder, self.processing_class) for x in inputs] inputs = self.data_collator(inputs) # need the prompt_ only inputs = self._prepare_inputs(inputs) context_length = inputs["prompt_input_ids"].shape[1] prompts = { "input_ids": inputs["prompt_input_ids"], "attention_mask": inputs["prompt_attention_mask"], "raw": prompts, } del inputs # Sample completions from both the model and the reference model model_output, ref_output = self._generate_completions(prompts, model) # Process model completions model_data, ref_data = self._process_completions(model_output, ref_output, prompts) # Compute rewards if self.reward_model is not None: model_scores, ref_scores = self._compute_rewards(model_data, ref_data, context_length) chosen_mask = model_scores >= ref_scores else: model_scores, ref_scores = None, None chosen_mask = self._compute_judge(model_data, ref_data, context_length) # Compute logprobs model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data = ( self._compute_logprobs(model, model_data, ref_data, context_length) ) # Compute loss loss, dpo_losses, xpo_losses = self._compute_losses( model_logprobs_model_data, model_logprobs_ref_data, ref_logprobs_ref_data, ref_logprobs_model_data, chosen_mask, ) # Log everything self._log_statistics( model_data, ref_data, model_logprobs_model_data.detach(), model_logprobs_ref_data.detach(), ref_logprobs_ref_data, ref_logprobs_model_data, chosen_mask, dpo_losses.detach(), xpo_losses.detach(), context_length, model_scores, ref_scores, ) if ( self.args.torch_empty_cache_steps is not None and self.state.global_step % self.args.torch_empty_cache_steps == 0 ): empty_cache() kwargs = {} # For LOMO optimizers you need to explicitly use the learning rate if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]: kwargs["learning_rate"] = self._get_learning_rate() if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: self.accelerator.backward(loss, **kwargs) return loss.detach() / self.args.gradient_accumulation_steps def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None tags = tags or [] if isinstance(tags, str): tags = [tags] if hasattr(self.model.config, "unsloth_version"): tags.append("unsloth") citation = textwrap.dedent("""\ @article{jung2024binary, title = {{Exploratory Preference Optimization: Harnessing Implicit Q*-Approximation for Sample-Efficient RLHF}}, author = {Tengyang Xie and Dylan J. Foster and Akshay Krishnamurthy and Corby Rosset and Ahmed Awadallah and Alexander Rakhlin}, year = 2024, eprint = {arXiv:2405.21046} }""") model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="XPO", trainer_citation=citation, paper_title="Exploratory Preference Optimization: Harnessing Implicit Q*-Approximation for Sample-Efficient RLHF", paper_id="2405.21046", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/xpo_trainer.py/0
{ "file_path": "trl/trl/trainer/xpo_trainer.py", "repo_id": "trl", "token_count": 11292 }
# Big model inference benchmarks Running inference with Accelerate on big models. ## Setup These benchmarks use the `transformers` library: ```bash pip install transformers ``` To reproduce or test a new setup, run ```py python inference_acc.py model_name ``` This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`. To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`. If you get an error linked to disk offload, you need to add the option `--disk-offload` ## Results On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included). | Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload | |:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:| | GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no | | GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no | | GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no | | GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes | | T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no | | OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no | | OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes | Note on the results: - using two GPUs instead of one does not slow down generation - using CPU offload slows down a bit (see OPT-30b) - using disk offload slows down a lot (need to implement prefetching) You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary: - peak GPU memory is exactly the size of the model put on a given GPU - peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
accelerate/benchmarks/big_model_inference/README.md/0
{ "file_path": "accelerate/benchmarks/big_model_inference/README.md", "repo_id": "accelerate", "token_count": 702 }
# Builds CPU-only Docker image of PyTorch # Uses multi-staged approach to reduce size # Stage 1 FROM python:3.9-slim as compile-image ARG DEBIAN_FRONTEND=noninteractive RUN apt update RUN apt-get install -y --no-install-recommends \ build-essential \ git \ gcc # Setup virtual environment for Docker ENV VIRTUAL_ENV=/opt/venv RUN python3 -m venv ${VIRTUAL_ENV} # Make sure we use the virtualenv ENV PATH="${VIRTUAL_ENV}/bin:$PATH" WORKDIR /workspace # Install specific CPU torch wheel to save on space RUN python3 -m pip install --upgrade --no-cache-dir pip RUN python3 -m pip install --no-cache-dir \ jupyter \ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \ --extra-index-url https://download.pytorch.org/whl/cpu # Stage 2 FROM python:3.9-slim AS build-image COPY --from=compile-image /opt/venv /opt/venv RUN useradd -ms /bin/bash user USER user # Make sure we use the virtualenv ENV PATH="/opt/venv/bin:$PATH" CMD ["/bin/bash"]
accelerate/docker/accelerate-cpu/Dockerfile/0
{ "file_path": "accelerate/docker/accelerate-cpu/Dockerfile", "repo_id": "accelerate", "token_count": 380 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Start Here! Please use the interactive tool below to help you get started with learning about a particular feature of Accelerate and how to utilize it! It will provide you with a code diff, an explanation towards what is going on, as well as provide you with some useful links to explore more within the documentation! Most code examples start from the following python code before integrating Accelerate in some way: ```python for batch in dataloader: optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss.backward() optimizer.step() scheduler.step() ``` <div class="block dark:hidden"> <iframe src="https://hf-accelerate-accelerate-examples.hf.space?__theme=light" width="850" height="1600" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://hf-accelerate-accelerate-examples.hf.space?__theme=dark" width="850" height="1600" ></iframe> </div>
accelerate/docs/source/usage_guides/explore.md/0
{ "file_path": "accelerate/docs/source/usage_guides/explore.md", "repo_id": "accelerate", "token_count": 566 }
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator, DataLoaderConfiguration ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a ResNet50 on the Oxford-IIT Pet Dataset # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## # Function to get the label from the filename def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} def training_function(config, args): # Initialize accelerator dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=args.use_stateful_dataloader) if args.with_tracking: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir, dataloader_config=dataloader_config, ) else: accelerator = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, dataloader_config=dataloader_config ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) image_size = config["image_size"] if not isinstance(image_size, (list, tuple)): image_size = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, "isdigit"): if args.checkpointing_steps == "epoch": checkpointing_steps = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: raise ValueError( f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: run = os.path.split(__file__)[-1].split(".")[0] accelerator.init_trackers(run, config) # Grab all the image filenames file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")] # Build the label correspondences all_labels = [extract_label(fname) for fname in file_names] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} # Set the seed before splitting the data. np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # Split our filenames between train and validation random_perm = np.random.permutation(len(file_names)) cut = int(0.8 * len(file_names)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training we use a simple RandomResizedCrop train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset( [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id ) # For evaluation, we use a deterministic Resize eval_tfm = Compose([Resize(image_size), ToTensor()]) eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders. train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Freezing the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # We normalize the batches of images to be a bit faster. mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device) std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device) # Instantiate optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25) # Instantiate learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to keep track of how many total steps we have iterated over overall_step = 0 # We also need to keep track of the starting epoch so files are named properly starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # Now we train the model for epoch in range(starting_epoch, num_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader active_dataloader = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(checkpointing_steps, int): output_dir = f"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) model.eval() accurate = 0 num_elems = 0 for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch = {k: v.to(accelerator.device) for k, v in batch.items()} inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["label"])) accurate_preds = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, }, step=overall_step, ) if checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument("--data_dir", required=True, help="The data folder on disk.") parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--use_stateful_dataloader", action="store_true", help="If the dataloader should be a resumable stateful dataloader.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) parser.add_argument( "--project_dir", type=str, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", ) args = parser.parse_args() config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(config, args) if __name__ == "__main__": main()
accelerate/examples/complete_cv_example.py/0
{ "file_path": "accelerate/examples/complete_cv_example.py", "repo_id": "accelerate", "token_count": 5557 }
# Distributed inference examples This folder contains a variety of tutorials for running distributed inference with the following strategy: Load an entire model onto each GPU and sending chunks of a batch through each GPU’s model copy at a time ## Installation ```bash pip install accelerate torch ``` ## Running code You can either use `torchrun` or the recommended way of `accelerate launch` (without needing to run `accelerate config`) on each script: ```bash accelerate launch --num_processes {NUM_GPUS} phi2.py ``` Or: ```bash torchrun --nproc-per-node {NUM_GPUS} phi2.py ```
accelerate/examples/inference/distributed/README.md/0
{ "file_path": "accelerate/examples/inference/distributed/README.md", "repo_id": "accelerate", "token_count": 175 }
#!/bin/bash -l #SBATCH --job-name=multicpu #SBATCH --nodes=2 # number of Nodes #SBATCH --ntasks-per-node=1 # number of MP tasks #SBATCH --exclusive #SBATCH --output=O-%x.%j #SBATCH --error=E-%x.%j ###################### ### Set enviroment ### ###################### source activateEnvironment.sh ###################### #### Set network ##### ###################### head_node_ip=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) ###################### # Setup env variables for distributed jobs export MASTER_PORT="${MASTER_PORT:-29555 }" echo "head_node_ip=${head_node_ip}" echo "MASTER_PORT=${MASTER_PORT}" INSTANCES_PER_NODE="${INSTANCES_PER_NODE:-1}" if [[ $SLURM_NNODES == 1 ]] && [[ $INSTANCES_PER_NODE == 1 ]]; then export CCL_WORKER_COUNT=0 LAUNCHER="" else # Setup env variables for distributed jobs export CCL_WORKER_COUNT="${CCL_WORKER_COUNT:-2}" echo "CCL_WORKER_COUNT=${CCL_WORKER_COUNT}" # Write hostfile HOSTFILE_PATH=hostfile scontrol show hostname $SLURM_JOB_NODELIST | perl -ne 'chomb; print "$_"x1'> ${HOSTFILE_PATH} export LAUNCHER="accelerate launch \ --num_processes $((SLURM_NNODES * ${INSTANCES_PER_NODE})) \ --num_machines $SLURM_NNODES \ --rdzv_backend c10d \ --main_process_ip $head_node_ip \ --main_process_port $MASTER_PORT \ --mpirun_hostfile $HOSTFILE_PATH \ --mpirun_ccl $CCL_WORKER_COUNT" fi # This step is necessary because accelerate launch does not handle multiline arguments properly export ACCELERATE_DIR="${ACCELERATE_DIR:-/accelerate}" export SCRIPT="${ACCELERATE_DIR}/examples/complete_nlp_example.py" export SCRIPT_ARGS=" \ --cpu \ --output_dir ${ACCELERATE_DIR}/examples/output \ " # This step is necessary because accelerate launch does not handle multiline arguments properly export CMD="$LAUNCHER $SCRIPT $SCRIPT_ARGS" # Print the command echo $CMD echo "" # Run the command eval $CMD
accelerate/examples/slurm/submit_multicpu.sh/0
{ "file_path": "accelerate/examples/slurm/submit_multicpu.sh", "repo_id": "accelerate", "token_count": 767 }
#!/usr/bin/env python # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from .config_args import default_config_file, load_config_from_file from .config_utils import SubcommandHelpFormatter description = "Update an existing config file with the latest defaults while maintaining the old configuration." def update_config(args): """ Update an existing config file with the latest defaults while maintaining the old configuration. """ config_file = args.config_file if config_file is None and Path(default_config_file).exists(): config_file = default_config_file elif not Path(config_file).exists(): raise ValueError(f"The passed config file located at {config_file} doesn't exist.") config = load_config_from_file(config_file) if config_file.endswith(".json"): config.to_json_file(config_file) else: config.to_yaml_file(config_file) return config_file def update_command_parser(parser, parents): parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter) parser.add_argument( "--config_file", default=None, help=( "The path to the config file to update. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), ) parser.set_defaults(func=update_config_command) return parser def update_config_command(args): config_file = update_config(args) print(f"Sucessfully updated the configuration file at {config_file}.")
accelerate/src/accelerate/commands/config/update.py/0
{ "file_path": "accelerate/src/accelerate/commands/config/update.py", "repo_id": "accelerate", "token_count": 774 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from types import MethodType from typing import Any, Dict, List, Optional, Tuple, Union from .state import PartialState from .utils import ( calculate_maximum_sizes, convert_bytes, copy_tensor_to_devices, ignorant_find_batch_size, infer_auto_device_map, is_pippy_available, pad_input_tensors, send_to_device, ) def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None): """ Calculates the device map for `model` with an offset for PiPPy """ if num_processes == 1: return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False) if max_memory is None: model_size, shared = calculate_maximum_sizes(model) # Split into `n` chunks for each GPU memory = (model_size + shared[0]) / num_processes memory = convert_bytes(memory) value, ending = memory.split(" ") # Add a chunk to deal with potential extra shared memory instances memory = math.ceil(float(value)) * 1.1 memory = f"{memory} {ending}" max_memory = {i: memory for i in range(num_processes)} device_map = infer_auto_device_map( model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, clean_result=False, ) return device_map def find_pippy_batch_size(args, kwargs): found_batch_size = None if args is not None: for arg in args: found_batch_size = ignorant_find_batch_size(arg) if found_batch_size is not None: break if kwargs is not None and found_batch_size is None: for kwarg in kwargs.values(): found_batch_size = ignorant_find_batch_size(kwarg) if found_batch_size is not None: break return found_batch_size def build_pipeline(model, split_points, args, kwargs, num_chunks): """ Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing in needed `args` and `kwargs` as the model needs on the CPU. Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use `AcceleratorState.num_processes` """ # Note: We import here to reduce import time from general modules, and isolate outside dependencies from torch.distributed.pipelining import ScheduleGPipe, SplitPoint, pipeline # We need to annotate the split points in the model for PiPPy state = PartialState() split_spec = {split_point: SplitPoint.BEGINNING for split_point in split_points} pipe = pipeline( model, mb_args=args, mb_kwargs=kwargs, split_spec=split_spec, ) stage = pipe.build_stage(state.local_process_index, device=state.device) schedule = ScheduleGPipe(stage, num_chunks) return schedule def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs): state = PartialState() output = None if state.num_processes == 1: output = forward(*args, **kwargs) elif state.is_local_main_process: found_batch_size = find_pippy_batch_size(args, kwargs) if found_batch_size is None: raise ValueError("Could not find batch size from args or kwargs") else: if found_batch_size != num_chunks: args = pad_input_tensors(args, found_batch_size, num_chunks) kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks) forward(*args, **kwargs) elif state.is_last_process: output = forward() else: forward() if gather_output: # Each node will get a copy of the full output which is only on the last GPU output = copy_tensor_to_devices(output) return output def prepare_pippy( model, split_points: Optional[Union[str, List[str]]] = "auto", no_split_module_classes: Optional[List[str]] = None, example_args: Optional[Tuple[Any]] = (), example_kwargs: Optional[Dict[str, Any]] = None, num_chunks: Optional[int] = None, gather_output: Optional[bool] = False, ): """ Wraps `model` for pipeline parallel inference. Args: model (`torch.nn.Module`): A model we want to split for pipeline-parallel inference split_points (`str` or `List[str]`, defaults to 'auto'): How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced split given any model. Should be a list of layer names in the model to split by otherwise. no_split_module_classes (`List[str]`): A list of class names for layers we don't want to be split. example_args (tuple of model inputs): The expected inputs for the model that uses order-based inputs for a *single process*. Recommended to use this method if possible. example_kwargs (dict of model inputs) The expected inputs for the model that uses dictionary-based inputs for a *single process*. This is a *highly* limiting structure that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition is true for all cases. num_chunks (`int`, defaults to the number of available GPUs): The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but this can be tuned and played with. In general one should have num_chunks >= num_gpus. gather_output (`bool`, defaults to `False`): If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs. """ if not is_pippy_available(): raise ImportError("Using `torch.distributed.pipelining` requires PyTorch 2.4.0 or later.") state = PartialState() example_args = send_to_device(example_args, "cpu") example_kwargs = send_to_device(example_kwargs, "cpu") if num_chunks is None: num_chunks = state.num_processes if split_points == "auto": device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes) split_points = [] for i in range(1, num_chunks): split_points.append(next(k for k, v in device_map.items() if v == i)) model.hf_split_points = split_points stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks) model._original_forward = model.forward model._original_call = model.__call__ model.pippy_stage = stage model.hf_split_points = split_points def forward(*args, **kwargs): return pippy_forward(stage.step, num_chunks, gather_output, *args, **kwargs) # To act like a decorator so that it can be popped when doing `extract_model_from_parallel` # Note: creates an infinite recursion loop with `generate` model_forward = MethodType(forward, model) forward.__wrapped__ = model_forward model.forward = forward return model
accelerate/src/accelerate/inference.py/0
{ "file_path": "accelerate/src/accelerate/inference.py", "repo_id": "accelerate", "token_count": 2855 }
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os from pathlib import Path import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): """ Creates a set of `DataLoader`s for the `glue` dataset. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. model_name (`str`, *optional*): """ tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.XLA: return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader def training_function(config, args): # Initialize accelerator accelerator = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) model_name = args.model_name_or_path set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) if args.add_pad_token: if model.config.pad_token_id is None: model.config.pad_token_id = 0 # Instantiate optimizer optimizer_cls = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) optimizer = optimizer_cls(params=model.parameters(), lr=lr) max_training_steps = len(train_dataloader) * num_epochs # Instantiate scheduler linear_decay_scheduler = False if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=max_training_steps, ) linear_decay_scheduler = True else: lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We also need to keep track of the stating epoch so files are named properly starting_epoch = 0 # Now we train the model metric = evaluate.load("glue", "mrpc") best_performance = 0 performance_metric = {} expected_lr_after_first_optim_step = lr * ( 1 - 1 / (max_training_steps / accelerator.num_processes / accelerator.gradient_accumulation_steps) ) lr_scheduler_check_completed = False for epoch in range(starting_epoch, num_epochs): model.train() for step, batch in enumerate(train_dataloader): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # assert the learning rate after first optimizer step if ( accelerator.sync_gradients and not lr_scheduler_check_completed and linear_decay_scheduler and accelerator.state.mixed_precision == "no" ): assert ( lr_scheduler.get_last_lr()[0] == expected_lr_after_first_optim_step ), f"Wrong lr found at second step, expected {expected_lr_after_first_optim_step}, got {lr_scheduler.get_last_lr()[0]}" lr_scheduler_check_completed = True model.eval() samples_seen = 0 for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) # It is slightly faster to call this once, than multiple times predictions, references = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(eval_dataloader) - 1: predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] references = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: best_performance = eval_metric["accuracy"] # check that the LR is 0 if linear_decay_scheduler and accelerator.state.mixed_precision == "no": assert ( lr_scheduler.get_last_lr()[0] == 0 ), f"Wrong lr found at last step, expected 0, got {lr_scheduler.get_last_lr()[0]}" if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump(performance_metric, f) # Finally try saving the model accelerator.save_model(model, args.output_dir) accelerator.wait_for_everyone() assert Path( args.output_dir, "model.safetensors" ).exists(), "Model was not saved when calling `Accelerator.save_model`" accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") parser.add_argument( "--model_name_or_path", type=str, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--performance_lower_bound", type=float, default=None, help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", ) parser.add_argument( "--num_epochs", type=int, default=3, help="Number of train epochs.", ) parser.add_argument( "--add_pad_token", type=bool, default=False, help="To add pad token if not exists.", ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/external_deps/test_performance.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_performance.py", "repo_id": "accelerate", "token_count": 4147 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import operator as op SCALER_NAME = "scaler.pt" MODEL_NAME = "pytorch_model" SAFE_MODEL_NAME = "model" RNG_STATE_NAME = "random_states" OPTIMIZER_NAME = "optimizer" SCHEDULER_NAME = "scheduler" SAMPLER_NAME = "sampler" PROFILE_PATTERN_NAME = "profile_{suffix}.json" WEIGHTS_NAME = f"{MODEL_NAME}.bin" WEIGHTS_PATTERN_NAME = "pytorch_model{suffix}.bin" WEIGHTS_INDEX_NAME = f"{WEIGHTS_NAME}.index.json" SAFE_WEIGHTS_NAME = f"{SAFE_MODEL_NAME}.safetensors" SAFE_WEIGHTS_PATTERN_NAME = "model{suffix}.safetensors" SAFE_WEIGHTS_INDEX_NAME = f"{SAFE_WEIGHTS_NAME}.index.json" SAGEMAKER_PYTORCH_VERSION = "1.10.2" SAGEMAKER_PYTHON_VERSION = "py38" SAGEMAKER_TRANSFORMERS_VERSION = "4.17.0" SAGEMAKER_PARALLEL_EC2_INSTANCES = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"] FSDP_SHARDING_STRATEGY = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"] FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"] FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"] FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"] FSDP_PYTORCH_VERSION = ( "2.1.0.a0+32f93b1" # Technically should be 2.1.0, but MS-AMP uses this specific prerelease in their Docker image. ) FSDP_MODEL_NAME = "pytorch_model_fsdp" DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich"] TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"] ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION = "2.2.0" XPU_PROFILING_AVAILABLE_PYTORCH_VERSION = "2.4.0" MITA_PROFILING_AVAILABLE_PYTORCH_VERSION = "2.1.0" BETA_TP_AVAILABLE_PYTORCH_VERSION = "2.3.0" BETA_TP_AVAILABLE_TRANSFORMERS_VERSION = "4.47.0" STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 TORCH_LAUNCH_PARAMS = [ "nnodes", "nproc_per_node", "rdzv_backend", "rdzv_endpoint", "rdzv_id", "rdzv_conf", "standalone", "max_restarts", "monitor_interval", "start_method", "role", "module", "m", "no_python", "run_path", "log_dir", "r", "redirects", "t", "tee", "node_rank", "master_addr", "master_port", ] CUDA_DISTRIBUTED_TYPES = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM", "TP"] TORCH_DISTRIBUTED_OPERATION_TYPES = CUDA_DISTRIBUTED_TYPES + [ "MULTI_NPU", "MULTI_MLU", "MULTI_MUSA", "MULTI_XPU", "MULTI_CPU", ]
accelerate/src/accelerate/utils/constants.py/0
{ "file_path": "accelerate/src/accelerate/utils/constants.py", "repo_id": "accelerate", "token_count": 1404 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def tqdm(*args, main_process_only: bool = True, **kwargs): """ Wrapper around `tqdm.tqdm` that optionally displays only on the main process. Args: main_process_only (`bool`, *optional*): Whether to display the progress bar only on the main process """ if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.") if len(args) > 0 and isinstance(args[0], bool): raise ValueError( "Passing `True` or `False` as the first argument to Accelerate's `tqdm` wrapper is unsupported. " "Please use the `main_process_only` keyword argument instead." ) disable = kwargs.pop("disable", False) if main_process_only and not disable: disable = PartialState().local_process_index != 0 return _tqdm(*args, **kwargs, disable=disable)
accelerate/src/accelerate/utils/tqdm.py/0
{ "file_path": "accelerate/src/accelerate/utils/tqdm.py", "repo_id": "accelerate", "token_count": 553 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import torch from accelerate import Accelerator from accelerate.big_modeling import dispatch_model from accelerate.test_utils import ( DEFAULT_LAUNCH_COMMAND, assert_exception, device_count, execute_subprocess_async, get_launch_command, path_in_accelerate_package, require_huggingface_suite, require_multi_device, require_multi_gpu, require_non_torch_xla, require_non_xpu, require_pippy, require_torchvision, torch_device, ) from accelerate.utils import patch_environment class MultiDeviceTester(unittest.TestCase): test_file_path = path_in_accelerate_package("test_utils", "scripts", "test_script.py") data_loop_file_path = path_in_accelerate_package("test_utils", "scripts", "test_distributed_data_loop.py") operation_file_path = path_in_accelerate_package("test_utils", "scripts", "test_ops.py") pippy_file_path = path_in_accelerate_package("test_utils", "scripts", "external_deps", "test_pippy.py") merge_weights_file_path = path_in_accelerate_package("test_utils", "scripts", "test_merge_weights.py") @require_multi_device def test_multi_device(self): print(f"Found {device_count} devices.") cmd = DEFAULT_LAUNCH_COMMAND + [self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd) @require_multi_device def test_multi_device_ops(self): print(f"Found {device_count} devices.") cmd = DEFAULT_LAUNCH_COMMAND + [self.operation_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd) @require_multi_device def test_pad_across_processes(self): print(f"Found {device_count} devices.") cmd = DEFAULT_LAUNCH_COMMAND + [inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd) @require_multi_device def test_multi_device_merge_fsdp_weights(self): print(f"Found {device_count} devices.") cmd = DEFAULT_LAUNCH_COMMAND + [self.merge_weights_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd) @require_non_torch_xla @require_multi_device def test_distributed_data_loop(self): """ This TestCase checks the behaviour that occurs during distributed training or evaluation, when the batch size does not evenly divide the dataset size. """ print(f"Found {device_count} devices, using 2 devices only") cmd = get_launch_command(num_processes=2) + [self.data_loop_file_path] env_kwargs = dict(omp_num_threads=1) if torch_device == "xpu": env_kwargs.update(ze_affinity_mask="0,1") elif torch_device == "npu": env_kwargs.update(ascend_rt_visible_devices="0,1") elif torch_device == "mlu": env_kwargs.update(mlu_visible_devices="0,1") else: env_kwargs.update(cuda_visible_devices="0,1") with patch_environment(**env_kwargs): execute_subprocess_async(cmd) @require_non_xpu @require_multi_gpu @require_pippy @require_torchvision @require_huggingface_suite def test_pippy(self): """ Checks the integration with the pippy framework """ print(f"Found {device_count} devices") cmd = get_launch_command(multi_gpu=True, num_processes=device_count) + [self.pippy_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd) if __name__ == "__main__": accelerator = Accelerator() shape = (accelerator.state.process_index + 2, 10) tensor = torch.randint(0, 10, shape).to(accelerator.device) error_msg = "" tensor1 = accelerator.pad_across_processes(tensor) if tensor1.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensor1.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensor1[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensor1[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." tensor2 = accelerator.pad_across_processes(tensor, pad_first=True) if tensor2.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensor2.shape} but should have {accelerator.state.num_processes + 1} at dim 0." index = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensor2[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensor2[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg) # Check device_map accelerator.print("Test `device_map` cannot be prepared.") class ModelForTest(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(3, 4) self.batchnorm = torch.nn.BatchNorm1d(4) self.linear2 = torch.nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) device_map = {"linear1": 0, "batchnorm": "cpu", "linear2": 1} model = ModelForTest() dispatch_model(model, device_map=device_map) with assert_exception(ValueError, "You can't train a model that has been loaded with"): model = accelerator.prepare_model(model)
accelerate/tests/test_multigpu.py/0
{ "file_path": "accelerate/tests/test_multigpu.py", "repo_id": "accelerate", "token_count": 2501 }
# Copyright 2022 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ import os from datetime import datetime as dt from datetime import timezone from github import Github LABELS_TO_EXEMPT = [ "good first issue", "feature request", "wip", ] def main(): g = Github(os.environ["GITHUB_TOKEN"]) repo = g.get_repo("huggingface/accelerate") open_issues = repo.get_issues(state="open") for issue in open_issues: comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None current_time = dt.now(timezone.utc) days_since_updated = (current_time - issue.updated_at).days days_since_creation = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed") elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
accelerate/utils/stale.py/0
{ "file_path": "accelerate/utils/stale.py", "repo_id": "accelerate", "token_count": 1013 }
[package] name = "candle-book" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } candle = { workspace = true } candle-datasets = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } candle-flash-attn = { workspace = true, optional = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } num-traits = { workspace = true } intel-mkl-src = { workspace = true, optional = true } cudarc = { workspace = true, optional = true } half = { workspace = true, optional = true } image = { workspace = true, optional = true } anyhow = { workspace = true } tokio = "1.43.0" [dev-dependencies] byteorder = { workspace = true } hf-hub = { workspace = true, features=["tokio"]} clap = { workspace = true } memmap2 = { workspace = true } rand = { workspace = true } tokenizers = { workspace = true, features = ["onig"] } tracing = { workspace = true } tracing-chrome = { workspace = true } tracing-subscriber = { workspace = true } # Necessary to disambiguate with tokio in wasm examples which are 1.28.1 parquet = { workspace = true } image = { workspace = true } [build-dependencies] anyhow = { workspace = true } [features] default = []
candle/candle-book/Cargo.toml/0
{ "file_path": "candle/candle-book/Cargo.toml", "repo_id": "candle", "token_count": 459 }
# Installation **With Cuda support**: 1. First, make sure that Cuda is correctly installed. - `nvcc --version` should print information about your Cuda compiler driver. - `nvidia-smi --query-gpu=compute_cap --format=csv` should print your GPUs compute capability, e.g. something like: ```bash compute_cap 8.9 ``` You can also compile the Cuda kernels for a specific compute cap using the `CUDA_COMPUTE_CAP=<compute cap>` environment variable. If any of the above commands errors out, please make sure to update your Cuda version. 2. Create a new app and add [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) with Cuda support. Start by creating a new cargo: ```bash cargo new myapp cd myapp ``` Make sure to add the `candle-core` crate with the cuda feature: ```bash cargo add --git https://github.com/huggingface/candle.git candle-core --features "cuda" ``` Run `cargo build` to make sure everything can be correctly built. ```bash cargo build ``` **Without Cuda support**: Create a new app and add [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) as follows: ```bash cargo new myapp cd myapp cargo add --git https://github.com/huggingface/candle.git candle-core ``` Finally, run `cargo build` to make sure everything can be correctly built. ```bash cargo build ``` **With mkl support** You can also see the `mkl` feature which could be interesting to get faster inference on CPU. [Using mkl](./advanced/mkl.md)
candle/candle-book/src/guide/installation.md/0
{ "file_path": "candle/candle-book/src/guide/installation.md", "repo_id": "candle", "token_count": 487 }
mod benchmarks; use criterion::criterion_main; criterion_main!( benchmarks::affine::benches, benchmarks::matmul::benches, benchmarks::random::benches, benchmarks::reduce::benches, benchmarks::where_cond::benches, benchmarks::conv_transpose2d::benches, benchmarks::qmatmul::benches, benchmarks::unary::benches );
candle/candle-core/benches/bench_main.rs/0
{ "file_path": "candle/candle-core/benches/bench_main.rs", "repo_id": "candle", "token_count": 126 }
//! Methods for backpropagation of gradients. use crate::op::{BinaryOp, Op, ReduceOp, UnaryOp}; use crate::{Error, Result, Tensor, TensorId}; use std::collections::HashMap; // arg has been reduced to node via reduce_dims, expand it back to arg. // This has to handle keepdims. fn broadcast_back(arg: &Tensor, node: &Tensor, reduced_dims: &[usize]) -> Result<Tensor> { if arg.rank() == node.rank() { // keepdim = true node.broadcast_as(arg.shape()) } else { // keepdim = false // first expand the reduced dims. node.reshape(reduced_dims)?.broadcast_as(arg.shape()) } } thread_local! { static CANDLE_GRAD_DO_NOT_DETACH: bool = { match std::env::var("CANDLE_GRAD_DO_NOT_DETACH") { Ok(s) => { !s.is_empty() && s != "0" }, Err(_) => false, } } } impl Tensor { /// Return all the nodes that lead to this value in a topologically sorted vec, the first /// elements having dependencies on the latter ones, e.g. the first element if any is the /// argument. /// This assumes that the op graph is a DAG. fn sorted_nodes(&self) -> Vec<&Tensor> { // The vec of sorted nodes is passed as an owned value rather than a mutable reference // to get around some lifetime limitations. fn walk<'a>( node: &'a Tensor, nodes: Vec<&'a Tensor>, already_seen: &mut HashMap<TensorId, bool>, ) -> (bool, Vec<&'a Tensor>) { if let Some(&tg) = already_seen.get(&node.id()) { return (tg, nodes); } let mut track_grad = false; let mut nodes = if node.is_variable() { // Do not call recursively on the "leaf" nodes. track_grad = true; nodes } else if node.dtype().is_int() { nodes } else if let Some(op) = node.op() { match op { Op::IndexAdd(t1, t2, t3, _) | Op::ScatterAdd(t1, t2, t3, _) | Op::CustomOp3(t1, t2, t3, _) | Op::WhereCond(t1, t2, t3) => { let (tg, nodes) = walk(t1, nodes, already_seen); track_grad |= tg; let (tg, nodes) = walk(t2, nodes, already_seen); track_grad |= tg; let (tg, nodes) = walk(t3, nodes, already_seen); track_grad |= tg; nodes } Op::Conv1D { arg: lhs, kernel: rhs, .. } | Op::ConvTranspose1D { arg: lhs, kernel: rhs, .. } | Op::Conv2D { arg: lhs, kernel: rhs, .. } | Op::ConvTranspose2D { arg: lhs, kernel: rhs, .. } | Op::CustomOp2(lhs, rhs, _) | Op::Binary(lhs, rhs, _) | Op::Gather(lhs, rhs, _) | Op::IndexSelect(lhs, rhs, _) | Op::Matmul(lhs, rhs) | Op::SliceScatter0(lhs, rhs, _) => { let (tg, nodes) = walk(lhs, nodes, already_seen); track_grad |= tg; let (tg, nodes) = walk(rhs, nodes, already_seen); track_grad |= tg; nodes } Op::Cat(args, _) => args.iter().fold(nodes, |nodes, arg| { let (tg, nodes) = walk(arg, nodes, already_seen); track_grad |= tg; nodes }), Op::Affine { arg, mul, .. } => { if *mul == 0. { nodes } else { let (tg, nodes) = walk(arg, nodes, already_seen); track_grad |= tg; nodes } } Op::Unary(_node, UnaryOp::Ceil) | Op::Unary(_node, UnaryOp::Floor) | Op::Unary(_node, UnaryOp::Round) | Op::Unary(_node, UnaryOp::Sign) => nodes, Op::Reshape(node) | Op::UpsampleNearest1D { arg: node, .. } | Op::UpsampleNearest2D { arg: node, .. } | Op::AvgPool2D { arg: node, .. } | Op::MaxPool2D { arg: node, .. } | Op::Copy(node) | Op::Broadcast(node) | Op::Cmp(node, _) | Op::Reduce(node, ReduceOp::Min | ReduceOp::Sum | ReduceOp::Max, _) | Op::ToDevice(node) | Op::Transpose(node, _, _) | Op::Permute(node, _) | Op::Narrow(node, _, _, _) | Op::Unary(node, _) | Op::Elu(node, _) | Op::Powf(node, _) | Op::CustomOp1(node, _) => { let (tg, nodes) = walk(node, nodes, already_seen); track_grad |= tg; nodes } Op::ToDType(node) => { if node.dtype().is_float() { let (tg, nodes) = walk(node, nodes, already_seen); track_grad |= tg; nodes } else { nodes } } Op::Reduce(_, ReduceOp::ArgMin | ReduceOp::ArgMax, _) => nodes, } } else { nodes }; already_seen.insert(node.id(), track_grad); if track_grad { nodes.push(node); } (track_grad, nodes) } let (_tg, mut nodes) = walk(self, vec![], &mut HashMap::new()); nodes.reverse(); nodes } pub fn backward(&self) -> Result<GradStore> { let sorted_nodes = self.sorted_nodes(); let mut grads = GradStore::new(); grads.insert(self, self.ones_like()?.contiguous()?); for node in sorted_nodes.iter() { if node.is_variable() { continue; } let grad = grads .remove(node) .expect("candle internal error - grad not populated"); // https://github.com/huggingface/candle/issues/1241 // Ideally, we would make these operations in place where possible to ensure that we // do not have to allocate too often. Here we just call `.detach` to avoid computing // the backprop graph of the backprop itself. This would be an issue for second order // derivatives but these are out of scope at the moment. let do_not_detach = CANDLE_GRAD_DO_NOT_DETACH.with(|b| *b); let grad = if do_not_detach { grad } else { grad.detach() }; if let Some(op) = node.op() { match op { Op::Binary(lhs, rhs, BinaryOp::Add) => { let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&grad)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.add(&grad)?; } Op::Binary(lhs, rhs, BinaryOp::Sub) => { let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&grad)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.sub(&grad)?; } Op::Binary(lhs, rhs, BinaryOp::Mul) => { let lhs_grad = grad.mul(rhs)?; let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?; let rhs_grad = grad.mul(lhs)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?; } Op::Binary(lhs, rhs, BinaryOp::Div) => { let lhs_grad = grad.div(rhs)?; let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?; let rhs_grad = grad.mul(lhs)?.div(&rhs.sqr()?)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.sub(&rhs_grad)?; } Op::Binary(lhs, rhs, BinaryOp::Minimum) | Op::Binary(lhs, rhs, BinaryOp::Maximum) => { let mask_lhs = node.eq(lhs)?.to_dtype(grad.dtype())?; let mask_rhs = node.eq(rhs)?.to_dtype(grad.dtype())?; // If both masks are 1 one the same point, we want to scale the // gradient by 0.5 rather than 1. let lhs_grad = mask_lhs.mul(&grad)?.div(&(&mask_rhs + 1.)?)?; let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?; let rhs_grad = mask_rhs.mul(&grad)?.div(&(&mask_lhs + 1.)?)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?; } Op::WhereCond(pred, t, f) => { let zeros = grad.zeros_like()?; let t_sum_grad = grads.or_insert(t)?; let t_grad = pred.where_cond(&grad, &zeros)?; *t_sum_grad = t_sum_grad.add(&t_grad)?; let f_sum_grad = grads.or_insert(f)?; let f_grad = pred.where_cond(&zeros, &grad)?; *f_sum_grad = f_sum_grad.add(&f_grad)?; } Op::Conv1D { arg, kernel, padding, stride, dilation, } => { // The output height for conv_transpose1d is: // (l_in - 1) * stride - 2 * padding + dilation * (k_size - 1) + out_padding + 1 let grad_l_in = grad.dim(2)?; let k_size = kernel.dim(2)?; let out_size = (grad_l_in - 1) * stride + dilation * (k_size - 1) + 1 - 2 * padding; let out_padding = arg.dim(2)? - out_size; let grad_arg = grad.conv_transpose1d( kernel, *padding, out_padding, *stride, *dilation, /* groups */ 1, )?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad_arg)?; let grad_kernel = arg .transpose(0, 1)? .conv1d(&grad.transpose(0, 1)?, *padding, *dilation, *stride, 1)? .transpose(0, 1)?; let sum_grad = grads.or_insert(kernel)?; let (_, _, k0) = kernel.dims3()?; let (_, _, g_k0) = grad_kernel.dims3()?; let grad_kernel = if g_k0 != k0 { grad_kernel.narrow(2, 0, k0)? } else { grad_kernel }; *sum_grad = sum_grad.add(&grad_kernel)?; } Op::Conv2D { arg, kernel, padding, stride, dilation, } => { // The output height for conv_transpose2d is: // (i_h - 1) * stride - 2 * padding + dilation * (k_h - 1) + out_padding + 1 let grad_h = grad.dim(2)?; let k_h = kernel.dim(2)?; let out_size = (grad_h - 1) * stride + dilation * (k_h - 1) + 1 - 2 * padding; let out_padding = arg.dim(2)? - out_size; let grad_arg = grad.conv_transpose2d( kernel, *padding, out_padding, *stride, *dilation, )?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad_arg)?; let grad_kernel = arg .transpose(0, 1)? .conv2d(&grad.transpose(0, 1)?, *padding, *dilation, *stride, 1)? .transpose(0, 1)?; let sum_grad = grads.or_insert(kernel)?; let (_, _, k0, k1) = kernel.dims4()?; let (_, _, g_k0, g_k1) = grad_kernel.dims4()?; let grad_kernel = if g_k0 != k0 || g_k1 != k1 { grad_kernel.narrow(2, 0, k0)?.narrow(3, 0, k1)? } else { grad_kernel }; *sum_grad = sum_grad.add(&grad_kernel)?; } Op::ConvTranspose1D { .. } => Err(Error::BackwardNotSupported { op: "conv-transpose1d", })?, Op::ConvTranspose2D { arg, kernel, padding, stride, dilation, output_padding: _output_padding, } => { let grad_arg = grad.conv2d(kernel, *padding, *stride, *dilation, 1)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad_arg)?; let grad_kernel = grad .transpose(0, 1)? .conv2d(&arg.transpose(0, 1)?, *padding, *dilation, *stride, 1)? .transpose(0, 1)?; let sum_grad = grads.or_insert(kernel)?; let (_, _, k0, k1) = kernel.dims4()?; let (_, _, g_k0, g_k1) = grad_kernel.dims4()?; let grad_kernel = if g_k0 != k0 || g_k1 != k1 { grad_kernel.narrow(2, 0, k0)?.narrow(3, 0, k1)? } else { grad_kernel }; *sum_grad = sum_grad.add(&grad_kernel)?; } Op::AvgPool2D { arg, kernel_size, stride, } => { if kernel_size != stride { crate::bail!("backward not supported for avgpool2d if ksize {kernel_size:?} != stride {stride:?}") } let (_n, _c, h, w) = arg.dims4()?; let grad_arg = grad.upsample_nearest2d(h, w)?; let grad_arg = (grad_arg * (1f64 / (kernel_size.0 * kernel_size.1) as f64))?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad_arg)?; } Op::MaxPool2D { arg, kernel_size, stride, } => { if kernel_size != stride { crate::bail!("backward not supported for maxpool2d if ksize {kernel_size:?} != stride {stride:?}") } let (_n, _c, h, w) = arg.dims4()?; // For computing the max-pool gradient, we compute a mask where a 1 means // that the element is the maximum, then we apply this mask to the // upsampled gradient (taking into account that multiple max may exist so // we scale the gradient for this case). let node_upsampled = node.upsample_nearest2d(h, w)?; let mask = arg.eq(&node_upsampled)?.to_dtype(arg.dtype())?; let avg = mask.avg_pool2d_with_stride(*kernel_size, *stride)?; let grad_arg = ((grad * avg)?.upsample_nearest2d(h, w)? * mask)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad_arg)?; } Op::UpsampleNearest1D { arg, target_size } => { let (_n, c, size) = arg.dims3()?; if target_size % size != 0 { crate::bail!("backward not supported for non integer upscaling factors") } let scale = target_size / size; let kernel = Tensor::ones((c, 1, scale), arg.dtype(), arg.device())?; let conv_sum = grad.conv1d(&kernel, 0, scale, 1, c)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = conv_sum; } Op::UpsampleNearest2D { arg, target_h, target_w, } => { let (_n, c, h, w) = arg.dims4()?; if target_h % h != 0 || target_w % w != 0 { crate::bail!("backward not supported for non integer upscaling factors") } let scale_h = target_h / h; let scale_w = target_w / w; if scale_h != scale_w { crate::bail!("backward not supported for non uniform upscaling factors") }; let kernel = Tensor::ones((c, 1, scale_h, scale_w), arg.dtype(), arg.device())?; let conv_sum = grad.conv2d(&kernel, 0, scale_h, 1, c)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = conv_sum; } Op::SliceScatter0(lhs, rhs, start_rhs) => { let rhs_sum_grad = grads.or_insert(rhs)?; let rhs_grad = grad.narrow(0, *start_rhs, rhs.dim(0)?)?; *rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?; let lhs_sum_grad = grads.or_insert(lhs)?; let lhs_grad = grad.slice_scatter0(&rhs.zeros_like()?, *start_rhs)?; *lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)? } Op::Gather(arg, indexes, dim) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.scatter_add(indexes, &grad, *dim)?; } Op::ScatterAdd(init, indexes, src, dim) => { let init_sum_grad = grads.or_insert(init)?; *init_sum_grad = init_sum_grad.add(&grad)?; let src_grad = grad.gather(indexes, *dim)?; let src_sum_grad = grads.or_insert(src)?; *src_sum_grad = src_sum_grad.add(&src_grad)?; } Op::IndexAdd(init, indexes, src, dim) => { let init_sum_grad = grads.or_insert(init)?; *init_sum_grad = init_sum_grad.add(&grad)?; let src_grad = grad.index_select(indexes, *dim)?; let src_sum_grad = grads.or_insert(src)?; *src_sum_grad = src_sum_grad.add(&src_grad)?; } Op::IndexSelect(arg, indexes, dim) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.index_add(indexes, &grad, *dim)?; } Op::Matmul(lhs, rhs) => { // Skipping checks, the op went ok, we can skip // the matmul size checks for now. let lhs_grad = grad.matmul(&rhs.t()?)?; let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?; let rhs_grad = lhs.t()?.matmul(&grad)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?; } Op::Cat(args, dim) => { let mut start_idx = 0; for arg in args { let len = arg.dims()[*dim]; let arg_grad = grad.narrow(*dim, start_idx, len)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)?; start_idx += len; } } Op::Broadcast(arg) => { let arg_dims = arg.dims(); let node_dims = node.dims(); // The number of dims that have been inserted on the left. let left_dims = node_dims.len() - arg_dims.len(); let mut sum_dims: Vec<usize> = (0..left_dims).collect(); for (dim, (node_dim, arg_dim)) in node_dims[left_dims..] .iter() .zip(arg_dims.iter()) .enumerate() { if node_dim != arg_dim { sum_dims.push(dim + left_dims) } } let mut arg_grad = grad.sum_keepdim(sum_dims.as_slice())?; for _i in 0..left_dims { arg_grad = arg_grad.squeeze(0)? } let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad.broadcast_as(sum_grad.dims())?)?; } Op::Reduce(arg, ReduceOp::Sum, reduced_dims) => { let grad = broadcast_back(arg, &grad, reduced_dims)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad)?; } Op::Reduce(arg, ReduceOp::Max, reduced_dims) => { let node = broadcast_back(arg, node, reduced_dims)?; let grad = broadcast_back(arg, &grad, reduced_dims)?; let grad = node.eq(arg)?.to_dtype(grad.dtype())?.mul(&grad)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad.broadcast_as(sum_grad.dims())?)?; } Op::Reduce(arg, ReduceOp::Min, reduced_dims) => { let node = broadcast_back(arg, node, reduced_dims)?; let grad = broadcast_back(arg, &grad, reduced_dims)?; let grad = node.eq(arg)?.to_dtype(grad.dtype())?.mul(&grad)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad.broadcast_as(sum_grad.dims())?)?; } Op::ToDType(arg) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad.to_dtype(arg.dtype())?)? } Op::Copy(arg) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad)? } Op::Affine { arg, mul, .. } => { let arg_grad = grad.affine(*mul, 0.)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Unary(arg, UnaryOp::Log) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&(grad / arg)?)? } Op::Unary(arg, UnaryOp::Sin) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&(&grad * arg.cos())?)? } Op::Unary(arg, UnaryOp::Cos) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.sub(&(&grad * arg.sin())?)? } Op::Unary(arg, UnaryOp::Tanh) => { let sum_grad = grads.or_insert(arg)?; let minus_dtanh = (node.sqr()? - 1.)?; *sum_grad = sum_grad.sub(&(&grad * &minus_dtanh)?)? } Op::Unary(arg, UnaryOp::Abs) => { let sum_grad = grads.or_insert(arg)?; let ones = arg.ones_like()?; let abs_grad = arg.ge(&arg.zeros_like()?)?.where_cond(&ones, &ones.neg()?); *sum_grad = sum_grad.add(&(&grad * abs_grad)?)? } Op::Unary(arg, UnaryOp::Exp) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&(&grad * *node)?)? } Op::Unary(arg, UnaryOp::Neg) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.sub(&grad)? } Op::Unary(arg, UnaryOp::Recip) => { let sum_grad = grads.or_insert(arg)?; let grad = (grad / arg.sqr()?)?; *sum_grad = sum_grad.sub(&grad)? } &Op::Narrow(ref arg, dim, start_idx, len) => { let arg_dims = arg.dims(); let left_pad = if start_idx == 0 { None } else { let mut dims = arg_dims.to_vec(); dims[dim] = start_idx; Some(Tensor::zeros(dims, grad.dtype(), grad.device())?) }; let right_pad = arg_dims[dim] - start_idx - len; let right_pad = if right_pad == 0 { None } else { let mut dims = arg_dims.to_vec(); dims[dim] = right_pad; Some(Tensor::zeros(dims, grad.dtype(), grad.device())?) }; let arg_grad = match (left_pad, right_pad) { (None, None) => grad, (Some(l), None) => Tensor::cat(&[&l, &grad], dim)?, (None, Some(r)) => Tensor::cat(&[&grad, &r], dim)?, (Some(l), Some(r)) => Tensor::cat(&[&l, &grad, &r], dim)?, }; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Unary(_, UnaryOp::Floor) | Op::Unary(_, UnaryOp::Round) | Op::Reduce(_, ReduceOp::ArgMin, _) | Op::Reduce(_, ReduceOp::ArgMax, _) | Op::Unary(_, UnaryOp::Sign) | Op::Cmp(_, _) => {} Op::Reshape(arg) => { let arg_grad = grad.reshape(arg.dims())?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Unary(_, UnaryOp::Ceil) => Err(Error::BackwardNotSupported { op: "ceil" })?, Op::Unary(arg, UnaryOp::Gelu) => { let sum_grad = grads.or_insert(arg)?; let cube = arg.powf(3.)?; let tanh = (0.0356774 * &cube + (0.797885 * arg)?)?.tanh()?; let gelu_grad = (((0.5 * &tanh)? + (0.0535161 * cube + (0.398942 * arg)?)? * (1. - tanh.powf(2.)?))? + 0.5)?; *sum_grad = sum_grad.add(&(&grad * gelu_grad)?)? } Op::Unary(arg, UnaryOp::Erf) => { let sum_grad = grads.or_insert(arg)?; // d/dx erf(x) = 2/sqrt(pi) * e^(-x^2) let erf_grad = (2. / std::f64::consts::PI.sqrt()) * (arg.sqr()?.neg()?).exp()?; *sum_grad = sum_grad.add(&(&grad * erf_grad)?)? } Op::Unary(arg, UnaryOp::GeluErf) => { let sum_grad = grads.or_insert(arg)?; // d/dx gelu_erf(x) = 0.5 + 0.398942 e^(-x^2/2) x + 0.5 erf(x/sqrt(2)) let neg_half_square = (arg.sqr()?.neg()? / 2.)?; let scaled_exp_arg = (0.398942 * neg_half_square.exp()? * arg)?; let arg_scaled_sqrt = (arg / 2f64.sqrt())?; let erf_scaled_sqrt = (0.5 * arg_scaled_sqrt.erf()?)?; let gelu_erf_grad = (0.5 + scaled_exp_arg + erf_scaled_sqrt)?; *sum_grad = sum_grad.add(&(&grad * gelu_erf_grad)?)?; } Op::Unary(arg, UnaryOp::Relu) => { let sum_grad = grads.or_insert(arg)?; let relu_grad = arg.ge(&arg.zeros_like()?)?.to_dtype(arg.dtype())?; *sum_grad = sum_grad.add(&(&grad * relu_grad)?)? } Op::Unary(arg, UnaryOp::Silu) => { let sum_grad = grads.or_insert(arg)?; // d/dx silu = sigmoid(x) * (1 + x * (1 - sigmoid(x))) = sigmoid(x) * (1 - node) + node let sigmoid_arg = (arg.neg()?.exp()? + 1.)?.recip()?; let silu_grad = &sigmoid_arg * (1. - *node) + *node; *sum_grad = sum_grad.add(&(&grad * silu_grad)?)? } Op::Elu(arg, alpha) => { // d/dx elu(x) = 1 for x > 0, alpha * e^x for x <= 0 let sum_grad = grads.or_insert(arg)?; let zeros = arg.zeros_like()?; let positive_mask = arg.gt(&zeros)?.to_dtype(arg.dtype())?; let negative_mask = arg.le(&zeros)?.to_dtype(arg.dtype())?; // node == alpha * (e^x - 1) for x <= 0, reuse it let negative_exp_mask = (negative_mask * (*node + *alpha))?; let combined_mask = (positive_mask + negative_exp_mask)?; *sum_grad = sum_grad.add(&(grad * combined_mask)?)? } Op::Powf(arg, e) => { let arg_grad = (&(grad * arg.powf(e - 1.)?)? * *e)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::CustomOp1(arg, c) => { if let Some(arg_grad) = c.bwd(arg, node, &grad)? { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } } Op::CustomOp2(arg1, arg2, c) => { let (arg_grad1, arg_grad2) = c.bwd(arg1, arg2, node, &grad)?; if let Some(arg_grad1) = arg_grad1 { let sum_grad = grads.or_insert(arg1)?; *sum_grad = sum_grad.add(&arg_grad1)? } if let Some(arg_grad2) = arg_grad2 { let sum_grad = grads.or_insert(arg2)?; *sum_grad = sum_grad.add(&arg_grad2)? } } Op::CustomOp3(arg1, arg2, arg3, c) => { let (arg_grad1, arg_grad2, arg_grad3) = c.bwd(arg1, arg2, arg3, node, &grad)?; if let Some(arg_grad1) = arg_grad1 { let sum_grad = grads.or_insert(arg1)?; *sum_grad = sum_grad.add(&arg_grad1)? } if let Some(arg_grad2) = arg_grad2 { let sum_grad = grads.or_insert(arg2)?; *sum_grad = sum_grad.add(&arg_grad2)? } if let Some(arg_grad3) = arg_grad3 { let sum_grad = grads.or_insert(arg3)?; *sum_grad = sum_grad.add(&arg_grad3)? } } Op::Unary(arg, UnaryOp::Sqr) => { let arg_grad = arg.mul(&grad)?.affine(2., 0.)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Unary(arg, UnaryOp::Sqrt) => { let arg_grad = grad.div(node)?.affine(0.5, 0.)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::ToDevice(arg) => { let sum_grad = grads.or_insert(arg)?; let arg_grad = grad.to_device(sum_grad.device())?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Transpose(arg, dim1, dim2) => { let arg_grad = grad.transpose(*dim1, *dim2)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Permute(arg, dims) => { let mut inv_dims = vec![0; dims.len()]; for (i, &dim_idx) in dims.iter().enumerate() { inv_dims[dim_idx] = i } let arg_grad = grad.permute(inv_dims)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } }; } } Ok(grads) } } /// A store for gradients, associating a tensor id to the corresponding gradient tensor, used for back propagation. #[derive(Debug)] pub struct GradStore(HashMap<TensorId, Tensor>); impl GradStore { /// Create a new gradient store fn new() -> Self { GradStore(HashMap::new()) } /// Get the gradient tensor corresponding to the given tensor id pub fn get_id(&self, id: TensorId) -> Option<&Tensor> { self.0.get(&id) } /// Get the gradient tensor associated with the given tensor pub fn get(&self, tensor: &Tensor) -> Option<&Tensor> { self.0.get(&tensor.id()) } /// Remove the gradient tensor associated with the given tensor, returning it if it exists pub fn remove(&mut self, tensor: &Tensor) -> Option<Tensor> { self.0.remove(&tensor.id()) } /// Insert a gradient tensor associated with the given tensor, returning the previous gradient tensor if it existed pub fn insert(&mut self, tensor: &Tensor, grad: Tensor) -> Option<Tensor> { self.0.insert(tensor.id(), grad) } /// Get the gradient tensor associated with the given tensor, or, if it does not exist, /// insert a tensor of zeroes, with the same shape and type as the given tensors and return it fn or_insert(&mut self, tensor: &Tensor) -> Result<&mut Tensor> { use std::collections::hash_map::Entry; let grad = match self.0.entry(tensor.id()) { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => { let grad = tensor.zeros_like()?; entry.insert(grad) } }; Ok(grad) } /// Get the tensor ids of the stored gradient tensors pub fn get_ids(&self) -> impl Iterator<Item = &TensorId> { self.0.keys() } }
candle/candle-core/src/backprop.rs/0
{ "file_path": "candle/candle-core/src/backprop.rs", "repo_id": "candle", "token_count": 24360 }
use crate::op::{BackpropOp, Op}; use crate::tensor::from_storage; use crate::{CpuStorage, CudaStorage, Layout, MetalStorage, Result, Shape, Tensor}; use std::sync::Arc; /// Unary ops that can be defined in user-land. pub trait CustomOp1 { // Box<dyn> does not support const yet, so use a function to get the name. fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd(&self, _storage: &CudaStorage, _layout: &Layout) -> Result<(CudaStorage, Shape)> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _storage: &MetalStorage, _layout: &Layout, ) -> Result<(MetalStorage, Shape)> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } /// This function takes as argument the argument `arg` used in the forward pass, the result /// produced by the forward operation `res` and the gradient of the result `grad_res`. /// The function should return the gradient of the argument. fn bwd(&self, _arg: &Tensor, _res: &Tensor, _grad_res: &Tensor) -> Result<Option<Tensor>> { Err(crate::Error::BackwardNotSupported { op: self.name() }) } } pub trait CustomOp2 { fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, ) -> Result<(CpuStorage, Shape)>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd( &self, _: &CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, ) -> Result<(CudaStorage, Shape)> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _: &MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, ) -> Result<(MetalStorage, Shape)> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } fn bwd( &self, _arg1: &Tensor, _arg2: &Tensor, _res: &Tensor, _grad_res: &Tensor, ) -> Result<(Option<Tensor>, Option<Tensor>)> { Err(crate::Error::BackwardNotSupported { op: self.name() }) } } pub trait CustomOp3 { fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd( &self, _: &CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, ) -> Result<(CudaStorage, Shape)> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _: &MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, ) -> Result<(MetalStorage, Shape)> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } fn bwd( &self, _arg1: &Tensor, _arg2: &Tensor, _arg3: &Tensor, _res: &Tensor, _grad_res: &Tensor, ) -> Result<(Option<Tensor>, Option<Tensor>, Option<Tensor>)> { Err(crate::Error::BackwardNotSupported { op: self.name() }) } } impl Tensor { /// Applies a unary custom op without backward support pub fn apply_op1_no_bwd<C: CustomOp1>(&self, c: &C) -> Result<Self> { let (storage, shape) = self.storage().apply_op1(self.layout(), c)?; Ok(from_storage(storage, shape, BackpropOp::none(), false)) } /// Applies a binary custom op without backward support pub fn apply_op2_no_bwd<C: CustomOp2>(&self, rhs: &Self, c: &C) -> Result<Self> { let (storage, shape) = self.storage() .apply_op2(self.layout(), &rhs.storage(), rhs.layout(), c)?; Ok(from_storage(storage, shape, BackpropOp::none(), false)) } /// Applies a ternary custom op without backward support pub fn apply_op3_no_bwd<C: CustomOp3>(&self, t2: &Self, t3: &Self, c: &C) -> Result<Self> { let (storage, shape) = self.storage().apply_op3( self.layout(), &t2.storage(), t2.layout(), &t3.storage(), t3.layout(), c, )?; Ok(from_storage(storage, shape, BackpropOp::none(), false)) } /// Applies a unary custom op. pub fn apply_op1_arc(&self, c: Arc<Box<dyn CustomOp1 + Send + Sync>>) -> Result<Self> { let (storage, shape) = self .storage() .apply_op1(self.layout(), c.as_ref().as_ref())?; let op = BackpropOp::new1(self, |s| Op::CustomOp1(s, c.clone())); Ok(from_storage(storage, shape, op, false)) } pub fn apply_op1<C: 'static + CustomOp1 + Send + Sync>(&self, c: C) -> Result<Self> { self.apply_op1_arc(Arc::new(Box::new(c))) } /// Applies a binary custom op. pub fn apply_op2_arc( &self, rhs: &Self, c: Arc<Box<dyn CustomOp2 + Send + Sync>>, ) -> Result<Self> { let (storage, shape) = self.storage().apply_op2( self.layout(), &rhs.storage(), rhs.layout(), c.as_ref().as_ref(), )?; let op = BackpropOp::new2(self, rhs, |t1, t2| Op::CustomOp2(t1, t2, c.clone())); Ok(from_storage(storage, shape, op, false)) } pub fn apply_op2<C: 'static + CustomOp2 + Send + Sync>(&self, r: &Self, c: C) -> Result<Self> { self.apply_op2_arc(r, Arc::new(Box::new(c))) } /// Applies a ternary custom op. pub fn apply_op3_arc( &self, t2: &Self, t3: &Self, c: Arc<Box<dyn CustomOp3 + Send + Sync>>, ) -> Result<Self> { let (storage, shape) = self.storage().apply_op3( self.layout(), &t2.storage(), t2.layout(), &t3.storage(), t3.layout(), c.as_ref().as_ref(), )?; let op = BackpropOp::new3(self, t2, t3, |t1, t2, t3| { Op::CustomOp3(t1, t2, t3, c.clone()) }); Ok(from_storage(storage, shape, op, false)) } pub fn apply_op3<C: 'static + CustomOp3 + Send + Sync>( &self, t2: &Self, t3: &Self, c: C, ) -> Result<Self> { self.apply_op3_arc(t2, t3, Arc::new(Box::new(c))) } } // In place ops. /// Unary ops that can be defined in user-land. /// These ops work in place and as such back-prop is unsupported. pub trait InplaceOp1 { // Box<dyn> does not support const yet, so use a function to get the name. fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd(&self, storage: &mut CpuStorage, layout: &Layout) -> Result<()>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd(&self, _storage: &mut CudaStorage, _layout: &Layout) -> Result<()> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd(&self, _storage: &mut MetalStorage, _layout: &Layout) -> Result<()> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } } pub trait InplaceOp2 { fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd(&self, s1: &mut CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout) -> Result<()>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd(&self, _: &mut CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout) -> Result<()> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _: &mut MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, ) -> Result<()> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } } pub trait InplaceOp3 { fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd( &self, s1: &mut CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<()>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd( &self, _: &mut CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, ) -> Result<()> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _: &mut MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, ) -> Result<()> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } } impl Tensor { /// Applies a unary custom op in place. pub fn inplace_op1<C: InplaceOp1>(&self, c: &C) -> Result<()> { self.storage_mut().inplace_op1(self.layout(), c) } /// Applies a unary custom op in place (for the first tensor). pub fn inplace_op2<C: InplaceOp2>(&self, rhs: &Self, c: &C) -> Result<()> { self.storage_mut() .inplace_op2(self.layout(), &rhs.storage(), rhs.layout(), c) } /// Applies a ternary custom op in place (for the first tensor). pub fn inplace_op3<C: InplaceOp3>(&self, t2: &Self, t3: &Self, c: &C) -> Result<()> { self.storage_mut().inplace_op3( self.layout(), &t2.storage(), t2.layout(), &t3.storage(), t3.layout(), c, ) } } pub struct UgIOp1 { name: &'static str, #[cfg(feature = "cuda")] func: cudarc::driver::CudaFunction, #[cfg(feature = "metal")] func: metal::ComputePipelineState, } impl UgIOp1 { #[allow(unused)] #[cfg(not(target_arch = "wasm32"))] pub fn new( name: &'static str, kernel: ug::lang::ssa::Kernel, device: &crate::Device, ) -> Result<Self> { #[cfg(feature = "cuda")] { let device = device.as_cuda_device()?; let func = device.compile(name, kernel)?; Ok(Self { name, func }) } #[cfg(feature = "metal")] { let device = device.as_metal_device()?; let func = device.compile(name, kernel)?; Ok(Self { name, func }) } #[cfg(not(any(feature = "cuda", feature = "metal")))] { Ok(Self { name }) } } } impl InplaceOp1 for UgIOp1 { fn name(&self) -> &'static str { self.name } fn cpu_fwd(&self, _: &mut CpuStorage, _: &Layout) -> Result<()> { crate::bail!("ug ops are only supported on metal/cuda at the moment") } #[cfg(feature = "metal")] fn metal_fwd(&self, sto: &mut MetalStorage, layout: &Layout) -> Result<()> { use crate::backend::BackendStorage; use candle_metal_kernels::utils::EncoderProvider; let elem_count = layout.shape().elem_count(); if sto.dtype() != crate::DType::F32 { // TODO: support more dtypes. crate::bail!("input is not a f32 tensor") } let device = sto.device(); println!("here"); let command_buffer = device.command_buffer()?; let command_buffer = &command_buffer; let encoder = command_buffer.encoder(); let encoder = encoder.as_ref(); encoder.set_compute_pipeline_state(&self.func); let (g, b) = if elem_count % 32 == 0 { (elem_count / 32, 32) } else { (elem_count, 1) }; let grid_dims = metal::MTLSize { width: g as u64, height: 1, depth: 1, }; let group_dims = candle_metal_kernels::utils::get_block_dims(b as u64, 1, 1); candle_metal_kernels::utils::set_param(encoder, 0, (sto.buffer(), 0usize)); encoder.use_resource(sto.buffer(), metal::MTLResourceUsage::Write); encoder.dispatch_threads(grid_dims, group_dims); Ok(()) } #[cfg(feature = "cuda")] fn cuda_fwd(&self, sto: &mut CudaStorage, layout: &Layout) -> Result<()> { use crate::cuda_backend::WrapErr; use cudarc::driver::LaunchAsync; let elem_count = layout.shape().elem_count(); // TODO: support more dtypes. let sto = sto.as_cuda_slice::<f32>()?; let sto = match layout.contiguous_offsets() { None => crate::bail!("input has to be contiguous"), Some((o1, o2)) => sto.slice(o1..o2), }; let params = (&sto,); let (g, b) = if elem_count % 32 == 0 { (elem_count / 32, 32) } else { (elem_count, 1) }; let cfg = cudarc::driver::LaunchConfig { grid_dim: (g as u32, 1, 1), block_dim: (b as u32, 1, 1), shared_mem_bytes: 0, }; unsafe { self.func.clone().launch(cfg, params) }.w()?; Ok(()) } }
candle/candle-core/src/custom_op.rs/0
{ "file_path": "candle/candle-core/src/custom_op.rs", "repo_id": "candle", "token_count": 7370 }
use super::k_quants::{ BlockQ2K, BlockQ3K, BlockQ4K, BlockQ4_0, BlockQ5K, BlockQ6K, BlockQ8K, BlockQ8_0, QK8_0, QK_K, }; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; use half::f16; #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; #[inline(always)] pub(crate) unsafe fn sum_i16_pairs_float(x: __m256i) -> __m256 { let ones = _mm256_set1_epi16(1); let summed_pairs = _mm256_madd_epi16(ones, x); _mm256_cvtepi32_ps(summed_pairs) } #[inline(always)] pub(crate) unsafe fn mul_sum_us8_pairs_float(ax: __m256i, sy: __m256i) -> __m256 { let dot = _mm256_maddubs_epi16(ax, sy); sum_i16_pairs_float(dot) } #[inline(always)] pub(crate) unsafe fn hsum_float_8(x: __m256) -> f32 { let res = _mm256_extractf128_ps(x, 1); let res = _mm_add_ps(res, _mm256_castps256_ps128(x)); let res = _mm_add_ps(res, _mm_movehl_ps(res, res)); let res = _mm_add_ss(res, _mm_movehdup_ps(res)); _mm_cvtss_f32(res) } #[inline(always)] pub(crate) unsafe fn bytes_from_nibbles_32(rsi: *const u8) -> __m256i { let tmp = _mm_loadu_si128(rsi as *const __m128i); let bytes = _mm256_insertf128_si256::<1>(_mm256_castsi128_si256(tmp), _mm_srli_epi16(tmp, 4)); let low_mask = _mm256_set1_epi8(0xF); _mm256_and_si256(low_mask, bytes) } #[inline(always)] pub(crate) unsafe fn mul_sum_i8_pairs_float(x: __m256i, y: __m256i) -> __m256 { let ax = _mm256_sign_epi8(x, x); let sy = _mm256_sign_epi8(y, x); mul_sum_us8_pairs_float(ax, sy) } #[inline(always)] pub(crate) fn vec_dot_q4_0_q8_0(n: usize, xs: &[BlockQ4_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = _mm256_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = _mm256_set1_ps(f16::to_f32(x.d) * f16::to_f32(y.d)); let bx = bytes_from_nibbles_32(x.qs.as_ptr()); let off = _mm256_set1_epi8(8); let bx = _mm256_sub_epi8(bx, off); let by = _mm256_loadu_si256(y.qs.as_ptr() as *const __m256i); let q = mul_sum_i8_pairs_float(bx, by); acc = _mm256_fmadd_ps(d, q, acc); } Ok(hsum_float_8(acc)) } } #[inline(always)] pub(crate) fn vec_dot_q8_0_q8_0(n: usize, xs: &[BlockQ8_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = _mm256_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = _mm256_set1_ps(f16::to_f32(x.d) * f16::to_f32(y.d)); let bx = _mm256_loadu_si256(x.qs.as_ptr() as *const __m256i); let by = _mm256_loadu_si256(y.qs.as_ptr() as *const __m256i); let q = mul_sum_i8_pairs_float(bx, by); acc = _mm256_fmadd_ps(d, q, acc); } Ok(hsum_float_8(acc)) } } #[inline(always)] unsafe fn get_scale_shuffle(i: usize) -> __m128i { const K_SHUFFLE: [u8; 128] = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, ]; _mm_loadu_si128((K_SHUFFLE.as_ptr() as *const __m128i).add(i)) } #[inline(always)] unsafe fn get_scale_shuffle_k4(i: usize) -> __m256i { const K_SHUFFLE: [u8; 256] = [ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, ]; _mm256_loadu_si256((K_SHUFFLE.as_ptr() as *const __m256i).add(i)) } #[inline(always)] unsafe fn get_scale_shuffle_q3k(i: usize) -> __m256i { const K_SHUFFLE: [u8; 128] = [ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, ]; _mm256_loadu_si256((K_SHUFFLE.as_ptr() as *const __m256i).add(i)) } #[inline(always)] pub(crate) fn vec_dot_q6k_q8k(n: usize, xs: &[BlockQ6K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % qk != 0 { crate::bail!("vec_dot_q6k_8k: {n} is not divisible by {qk}") } unsafe { let m4 = _mm256_set1_epi8(0xF); let m2 = _mm256_set1_epi8(3); let m32s = _mm256_set1_epi8(32); let mut acc = _mm256_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let mut q4 = x.ql.as_ptr(); let mut qh = x.qh.as_ptr(); let mut q8 = y.qs.as_ptr(); let scales = _mm_loadu_si128(x.scales.as_ptr() as *const __m128i); let mut sumi = _mm256_setzero_si256(); for j in 0..QK_K / 128 { let is = j * 4; let scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is)); let scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); let scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); let scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); let q4bits1 = _mm256_loadu_si256(q4 as *const __m256i); q4 = q4.add(32); let q4bits2 = _mm256_loadu_si256(q4 as *const __m256i); q4 = q4.add(32); let q4bits_h = _mm256_loadu_si256(qh as *const __m256i); qh = qh.add(32); let q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bits_h, m2), 4); let q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bits_h, 2), m2), 4); let q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bits_h, 4), m2), 4); let q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bits_h, 6), m2), 4); let q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); let q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1); let q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2); let q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3); let q8_0 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_1 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_2 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_3 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); let q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); let q8s_2 = _mm256_maddubs_epi16(m32s, q8_2); let q8s_3 = _mm256_maddubs_epi16(m32s, q8_3); let p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); let p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); let p16_2 = _mm256_maddubs_epi16(q4_2, q8_2); let p16_3 = _mm256_maddubs_epi16(q4_3, q8_3); let p16_0 = _mm256_sub_epi16(p16_0, q8s_0); let p16_1 = _mm256_sub_epi16(p16_1, q8s_1); let p16_2 = _mm256_sub_epi16(p16_2, q8s_2); let p16_3 = _mm256_sub_epi16(p16_3, q8s_3); let p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); let p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); let p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2); let p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3)); } acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } Ok(hsum_float_8(acc)) } } #[inline(always)] unsafe fn mm256_set_m128i(a: __m128i, b: __m128i) -> __m256i { _mm256_insertf128_si256(_mm256_castsi128_si256(b), a, 1) } #[inline(always)] pub(crate) fn vec_dot_q2k_q8k(n: usize, xs: &[BlockQ2K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } unsafe { let m3 = _mm256_set1_epi8(3); let m4 = _mm_set1_epi8(0xF); let mut acc = _mm256_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = -y.d * x.dmin.to_f32(); let mut q2 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mins_and_scales = _mm_loadu_si128(x.scales.as_ptr() as *const __m128i); let scales8 = _mm_and_si128(mins_and_scales, m4); let mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); let mins = _mm256_cvtepi8_epi16(mins8); let prod = _mm256_madd_epi16(mins, _mm256_loadu_si256(y.bsums.as_ptr() as *const __m256i)); acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); let all_scales = _mm256_cvtepi8_epi16(scales8); let l_scales = _mm256_extracti128_si256(all_scales, 0); let h_scales = _mm256_extracti128_si256(all_scales, 1); let scales = [ mm256_set_m128i(l_scales, l_scales), mm256_set_m128i(h_scales, h_scales), ]; let mut sumi = _mm256_setzero_si256(); for scale in scales { let q2bits = _mm256_loadu_si256(q2 as *const __m256i); q2 = q2.add(32); let q8_0 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_1 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_2 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_3 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q2_0 = _mm256_and_si256(q2bits, m3); let q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); let q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); let q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); let p0 = _mm256_maddubs_epi16(q2_0, q8_0); let p1 = _mm256_maddubs_epi16(q2_1, q8_1); let p2 = _mm256_maddubs_epi16(q2_2, q8_2); let p3 = _mm256_maddubs_epi16(q2_3, q8_3); let p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scale, get_scale_shuffle_q3k(0)), p0); let p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scale, get_scale_shuffle_q3k(1)), p1); let p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scale, get_scale_shuffle_q3k(2)), p2); let p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scale, get_scale_shuffle_q3k(3)), p3); let p0 = _mm256_add_epi32(p0, p1); let p2 = _mm256_add_epi32(p2, p3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); } acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } Ok(hsum_float_8(acc)) } } #[inline(always)] pub(crate) fn vec_dot_q3k_q8k(n: usize, xs: &[BlockQ3K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q3k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x03030303; const KMASK2: u32 = 0x0f0f0f0f; let mut aux = [0u32; 3]; unsafe { let m3 = _mm256_set1_epi8(3); let mone = _mm256_set1_epi8(1); let m32 = _mm_set1_epi8(32); let mut acc = _mm256_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let mut q3 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); LittleEndian::read_u32_into(&x.scales, &mut aux); let scales128 = _mm_set_epi32( (((aux[1] >> 4) & KMASK2) | (((aux[2] >> 6) & KMASK1) << 4)) as i32, (((aux[0] >> 4) & KMASK2) | (((aux[2] >> 4) & KMASK1) << 4)) as i32, ((aux[1] & KMASK2) | (((aux[2] >> 2) & KMASK1) << 4)) as i32, ((aux[0] & KMASK2) | (((aux[2]) & KMASK1) << 4)) as i32, ); let scales128 = _mm_sub_epi8(scales128, m32); let all_scales = _mm256_cvtepi8_epi16(scales128); let l_scales = _mm256_extracti128_si256(all_scales, 0); let h_scales = _mm256_extracti128_si256(all_scales, 1); let scales = [ mm256_set_m128i(l_scales, l_scales), mm256_set_m128i(h_scales, h_scales), ]; // high bit let hbits = _mm256_loadu_si256(x.hmask.as_ptr() as *const __m256i); let mut sumi = _mm256_setzero_si256(); for (j, scale) in scales.iter().enumerate() { // load low 2 bits let q3bits = _mm256_loadu_si256(q3 as *const __m256i); q3 = q3.add(32); // Prepare low and high bits // We hardcode the shifts here to avoid loading them into a separate register let q3l_0 = _mm256_and_si256(q3bits, m3); let q3h_0 = if j == 0 { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 0)), 0) } else { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 4)), 4) }; let q3h_0 = _mm256_slli_epi16(q3h_0, 2); let q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); let q3h_1 = if j == 0 { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 1)), 1) } else { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 5)), 5) }; let q3h_1 = _mm256_slli_epi16(q3h_1, 2); let q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); let q3h_2 = if j == 0 { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 2)), 2) } else { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 6)), 6) }; let q3h_2 = _mm256_slli_epi16(q3h_2, 2); let q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); let q3h_3 = if j == 0 { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 3)), 3) } else { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 7)), 7) }; let q3h_3 = _mm256_slli_epi16(q3h_3, 2); // load Q8 quants let q8_0 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_1 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_2 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_3 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we // can use _mm256_maddubs_epi16, and then subtract. The high bit part has the 2 // already subtracted (and so, it is zero if the high bit was not set, and 2 if the // high bit was set) let q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); let q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); let q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); let q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); let p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); let p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); let p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); let p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); let p16_0 = _mm256_sub_epi16(p16_0, q8s_0); let p16_1 = _mm256_sub_epi16(p16_1, q8s_1); let p16_2 = _mm256_sub_epi16(p16_2, q8s_2); let p16_3 = _mm256_sub_epi16(p16_3, q8s_3); // multiply with scales let p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(*scale, get_scale_shuffle_q3k(0)), p16_0); let p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(*scale, get_scale_shuffle_q3k(1)), p16_1); let p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(*scale, get_scale_shuffle_q3k(2)), p16_2); let p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(*scale, get_scale_shuffle_q3k(3)), p16_3); // accumulate let p16_0 = _mm256_add_epi32(p16_0, p16_1); let p16_2 = _mm256_add_epi32(p16_2, p16_3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); } // multiply with block scale and accumulate acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } Ok(hsum_float_8(acc)) } } #[inline(always)] pub(crate) fn vec_dot_q4k_q8k(n: usize, xs: &[BlockQ4K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } let mut utmp = [0u32; 4]; const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; unsafe { let m4 = _mm256_set1_epi8(0xF); let mut acc = _mm256_setzero_ps(); let mut acc_m = _mm_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = -y.d * x.dmin.to_f32(); LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; let mut q4 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32( utmp[3] as i32, utmp[2] as i32, utmp[1] as i32, utmp[0] as i32, )); let q8sums = _mm256_loadu_si256(y.bsums.as_ptr() as *const __m256i); let q8s = _mm_hadd_epi16( _mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1), ); let prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m); let sc128 = _mm256_extracti128_si256(mins_and_scales, 0); let scales = mm256_set_m128i(sc128, sc128); let mut sumi = _mm256_setzero_si256(); for j in 0..QK_K / 64 { let scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j)); let scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j + 1)); let q4bits = _mm256_loadu_si256(q4 as *const __m256i); q4 = q4.add(32); let q4l = _mm256_and_si256(q4bits, m4); let q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); let q8l = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let p16l = _mm256_maddubs_epi16(q4l, q8l); let p16l = _mm256_madd_epi16(scale_l, p16l); sumi = _mm256_add_epi32(sumi, p16l); let q8h = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let p16h = _mm256_maddubs_epi16(q4h, q8h); let p16h = _mm256_madd_epi16(scale_h, p16h); sumi = _mm256_add_epi32(sumi, p16h); } let vd = _mm256_set1_ps(d); acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); } let acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); let acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); Ok(hsum_float_8(acc) + _mm_cvtss_f32(acc_m)) } } #[inline(always)] pub(crate) fn vec_dot_q5k_q8k(n: usize, xs: &[BlockQ5K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q5k_q8k: {n} is not divisible by {QK_K}") } let mut utmp = [0u32; 4]; const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; unsafe { let m4 = _mm256_set1_epi8(0xF); let mzero = _mm_setzero_si128(); let mone = _mm256_set1_epi8(1); let mut acc = _mm256_setzero_ps(); let mut summs = 0.0; for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = -y.d * x.dmin.to_f32(); LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; let mut q5 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32( utmp[3] as i32, utmp[2] as i32, utmp[1] as i32, utmp[0] as i32, )); let q8sums = _mm256_loadu_si256(y.bsums.as_ptr() as *const __m256i); let q8s = _mm_hadd_epi16( _mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1), ); let prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); let hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); summs += dmin * _mm_extract_epi32(hsum, 0) as f32; let sc128 = _mm256_extracti128_si256(mins_and_scales, 0); let scales = mm256_set_m128i(sc128, sc128); let hbits = _mm256_loadu_si256(x.qh.as_ptr() as *const __m256i); let mut hmask = mone; let mut sumi = _mm256_setzero_si256(); for j in 0..QK_K / 64 { let scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j)); let scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j + 1)); let q5bits = _mm256_loadu_si256(q5 as *const __m256i); q5 = q5.add(32); //Similar to q3k we hardcode the shifts here to avoid loading them into a separate register let q5l_0 = _mm256_and_si256(q5bits, m4); let q5l_0_shift_input = _mm256_and_si256(hbits, hmask); let q5l_0_right_shift = match j { 0 => _mm256_srli_epi16(q5l_0_shift_input, 0), 1 => _mm256_srli_epi16(q5l_0_shift_input, 2), 2 => _mm256_srli_epi16(q5l_0_shift_input, 4), 3 => _mm256_srli_epi16(q5l_0_shift_input, 6), _ => unreachable!(), }; let q5h_0 = _mm256_slli_epi16(q5l_0_right_shift, 4); let q5_0 = _mm256_add_epi8(q5l_0, q5h_0); hmask = _mm256_slli_epi16(hmask, 1); let q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4); let q5l_1_shift_input = _mm256_and_si256(hbits, hmask); let q5l_1_right_shift = match j { 0 => _mm256_srli_epi16(q5l_1_shift_input, 1), 1 => _mm256_srli_epi16(q5l_1_shift_input, 3), 2 => _mm256_srli_epi16(q5l_1_shift_input, 5), 3 => _mm256_srli_epi16(q5l_1_shift_input, 7), _ => unreachable!(), }; let q5h_1 = _mm256_slli_epi16(q5l_1_right_shift, 4); let q5_1 = _mm256_add_epi8(q5l_1, q5h_1); hmask = _mm256_slli_epi16(hmask, 1); let q8_0 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_1 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let p16_0 = _mm256_maddubs_epi16(q5_0, q8_0); let p16_1 = _mm256_maddubs_epi16(q5_1, q8_1); let p16_0 = _mm256_madd_epi16(scale_0, p16_0); let p16_1 = _mm256_madd_epi16(scale_1, p16_1); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); } let vd = _mm256_set1_ps(d); acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); } Ok(hsum_float_8(acc) + summs) } } #[inline(always)] pub(crate) fn vec_dot_q8k_q8k(n: usize, xs: &[BlockQ8K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % qk != 0 { crate::bail!("vec_dot_q8k_8k: {n} is not divisible by {qk}") } unsafe { let mut acc = _mm256_setzero_ps(); for (xs, ys) in xs.iter().zip(ys.iter()) { let mut sumi = _mm256_setzero_si256(); let x_qs = xs.qs.as_ptr(); let y_qs = ys.qs.as_ptr(); for j in (0..QK_K).step_by(32) { let xs = _mm256_loadu_si256(x_qs.add(j) as *const __m256i); let ys = _mm256_loadu_si256(y_qs.add(j) as *const __m256i); let xs0 = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(xs, 0)); let ys0 = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(ys, 0)); sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(xs0, ys0)); let xs1 = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(xs, 1)); let ys1 = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(ys, 1)); sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(xs1, ys1)); } let d = _mm256_set1_ps(xs.d * ys.d); acc = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi), acc); } Ok(hsum_float_8(acc)) } }
candle/candle-core/src/quantized/avx.rs/0
{ "file_path": "candle/candle-core/src/quantized/avx.rs", "repo_id": "candle", "token_count": 17495 }
use crate::backend::BackendStorage; use crate::op::{self, CmpOp, ReduceOp}; use crate::{CpuStorage, CudaStorage, DType, Device, Error, Layout, MetalStorage, Result, Shape}; use crate::{CustomOp1, CustomOp2, CustomOp3, InplaceOp1, InplaceOp2, InplaceOp3}; // We do not want to implement Clone on Storage as cloning may fail because of // out of memory. Instead try_clone should be used. #[derive(Debug)] pub enum Storage { Cpu(CpuStorage), Cuda(CudaStorage), Metal(MetalStorage), } impl Storage { pub fn try_clone(&self, layout: &Layout) -> Result<Self> { match self { Self::Cpu(storage) => Ok(Self::Cpu(storage.clone())), Self::Cuda(storage) => { let storage = storage.try_clone(layout)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.try_clone(layout)?; Ok(Self::Metal(storage)) } } } pub fn device(&self) -> Device { match self { Self::Cpu(_) => Device::Cpu, Self::Cuda(storage) => Device::Cuda(storage.device().clone()), Self::Metal(storage) => Device::Metal(storage.device().clone()), } } pub fn dtype(&self) -> DType { match self { Self::Cpu(storage) => storage.dtype(), Self::Cuda(storage) => storage.dtype(), Self::Metal(storage) => storage.dtype(), } } pub(crate) fn same_device(&self, rhs: &Self, op: &'static str) -> Result<()> { let lhs_device = self.device(); let rhs_device = rhs.device(); let lhs = lhs_device.location(); let rhs = rhs_device.location(); let same_device = if self.device().is_metal() { // On metal, we require the device to be exactly the same rather than // having the same location. In cuda this is not necessary as all CudaDevice on the // same GPU will use the same cuda stream. lhs_device.same_device(&rhs_device) } else { lhs == rhs }; if !same_device { Err(Error::DeviceMismatchBinaryOp { lhs, rhs, op }.bt()) } else { Ok(()) } } pub(crate) fn same_dtype(&self, rhs: &Self, op: &'static str) -> Result<()> { let lhs = self.dtype(); let rhs = rhs.dtype(); if lhs != rhs { Err(Error::DTypeMismatchBinaryOp { lhs, rhs, op }.bt()) } else { Ok(()) } } pub(crate) fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Metal(storage)) } } } pub(crate) fn powf(&self, layout: &Layout, alpha: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Metal(storage)) } } } pub(crate) fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Metal(storage)) } } } pub(crate) fn cmp( &self, op: CmpOp, rhs: &Self, lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, "cmp")?; self.same_dtype(rhs, "cmp")?; match (self, rhs) { (Storage::Cpu(lhs), Storage::Cpu(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => { // Should not happen because of the same device check above but we're defensive // anyway. Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "cmp", } .bt()) } } } pub(crate) fn reduce_op(&self, op: ReduceOp, layout: &Layout, s: &[usize]) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Metal(storage)) } } } pub(crate) fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Metal(storage)) } } } pub(crate) fn apply_op1(&self, l: &Layout, c: &dyn CustomOp1) -> Result<(Self, Shape)> { match self { Self::Cpu(storage) => { let (storage, shape) = c.cpu_fwd(storage, l)?; Ok((Self::Cpu(storage), shape)) } Self::Cuda(storage) => { let (storage, shape) = c.cuda_fwd(storage, l)?; Ok((Self::Cuda(storage), shape)) } Self::Metal(storage) => { let (storage, shape) = c.metal_fwd(storage, l)?; Ok((Self::Metal(storage), shape)) } } } pub(crate) fn apply_op2( &self, l1: &Layout, t2: &Self, l2: &Layout, c: &dyn CustomOp2, ) -> Result<(Self, Shape)> { self.same_device(t2, c.name())?; match (self, t2) { (Self::Cpu(s1), Self::Cpu(s2)) => { let (s, shape) = c.cpu_fwd(s1, l1, s2, l2)?; Ok((Self::Cpu(s), shape)) } (Self::Cuda(s1), Self::Cuda(s2)) => { let (s, shape) = c.cuda_fwd(s1, l1, s2, l2)?; Ok((Self::Cuda(s), shape)) } (Self::Metal(s1), Self::Metal(s2)) => { let (s, shape) = c.metal_fwd(s1, l1, s2, l2)?; Ok((Self::Metal(s), shape)) } _ => unreachable!(), } } pub(crate) fn apply_op3( &self, l1: &Layout, t2: &Self, l2: &Layout, t3: &Self, l3: &Layout, c: &dyn CustomOp3, ) -> Result<(Self, Shape)> { self.same_device(t2, c.name())?; self.same_device(t3, c.name())?; match (self, t2, t3) { (Self::Cpu(s1), Self::Cpu(s2), Self::Cpu(s3)) => { let (s, shape) = c.cpu_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Cpu(s), shape)) } (Self::Cuda(s1), Self::Cuda(s2), Self::Cuda(s3)) => { let (s, shape) = c.cuda_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Cuda(s), shape)) } (Self::Metal(s1), Self::Metal(s2), Self::Metal(s3)) => { let (s, shape) = c.metal_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Metal(s), shape)) } _ => unreachable!(), } } pub(crate) fn inplace_op1(&mut self, l: &Layout, c: &dyn InplaceOp1) -> Result<()> { match self { Self::Cpu(storage) => c.cpu_fwd(storage, l), Self::Cuda(storage) => c.cuda_fwd(storage, l), Self::Metal(storage) => c.metal_fwd(storage, l), } } pub(crate) fn inplace_op2( &mut self, l1: &Layout, t2: &Self, l2: &Layout, c: &dyn InplaceOp2, ) -> Result<()> { self.same_device(t2, c.name())?; match (self, t2) { (Self::Cpu(s1), Self::Cpu(s2)) => c.cpu_fwd(s1, l1, s2, l2), (Self::Cuda(s1), Self::Cuda(s2)) => c.cuda_fwd(s1, l1, s2, l2), (Self::Metal(s1), Self::Metal(s2)) => c.metal_fwd(s1, l1, s2, l2), _ => unreachable!(), } } pub(crate) fn inplace_op3( &mut self, l1: &Layout, t2: &Self, l2: &Layout, t3: &Self, l3: &Layout, c: &dyn InplaceOp3, ) -> Result<()> { self.same_device(t2, c.name())?; self.same_device(t3, c.name())?; match (self, t2, t3) { (Self::Cpu(s1), Self::Cpu(s2), Self::Cpu(s3)) => c.cpu_fwd(s1, l1, s2, l2, s3, l3), (Self::Cuda(s1), Self::Cuda(s2), Self::Cuda(s3)) => c.cuda_fwd(s1, l1, s2, l2, s3, l3), (Self::Metal(s1), Self::Metal(s2), Self::Metal(s3)) => { c.metal_fwd(s1, l1, s2, l2, s3, l3) } _ => unreachable!(), } } pub(crate) fn unary_impl<B: op::UnaryOpT>(&self, layout: &Layout) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Metal(storage)) } } } pub(crate) fn binary_impl<B: op::BinaryOpT>( &self, rhs: &Self, lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, B::NAME)?; self.same_dtype(rhs, B::NAME)?; match (self, rhs) { (Storage::Cpu(lhs), Storage::Cpu(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => { // Should not happen because of the same device check above but we're defensive // anyway. Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: B::NAME, } .bt()) } } } pub(crate) fn conv1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv1D, ) -> Result<Self> { self.same_device(kernel, "conv1d")?; self.same_dtype(kernel, "conv1d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv1d", } .bt()), } } pub(crate) fn conv_transpose1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { self.same_device(kernel, "conv-transpose1d")?; self.same_dtype(kernel, "conv-transpose1d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv-transpose1d", } .bt()), } } pub(crate) fn conv2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { self.same_device(kernel, "conv2d")?; self.same_dtype(kernel, "conv2d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv2d", } .bt()), } } pub(crate) fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { self.same_device(kernel, "conv_transpose2d")?; self.same_dtype(kernel, "conv_transpose2d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv_transpose2d", } .bt()), } } pub(crate) fn avg_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Metal(storage)) } } } pub(crate) fn max_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Metal(storage)) } } } pub(crate) fn upsample_nearest1d(&self, layout: &Layout, sz: usize) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Metal(storage)) } } } pub(crate) fn upsample_nearest2d(&self, layout: &Layout, h: usize, w: usize) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Metal(storage)) } } } pub(crate) fn where_cond( &self, layout: &Layout, t: &Self, layout_t: &Layout, f: &Self, layout_f: &Layout, ) -> Result<Self> { self.same_device(t, "where")?; self.same_device(f, "where")?; t.same_dtype(f, "where")?; match (self, t, f) { (Storage::Cpu(cond), Storage::Cpu(t), Storage::Cpu(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Cpu(storage)) } (Self::Cuda(cond), Self::Cuda(t), Self::Cuda(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Cuda(storage)) } (Self::Metal(cond), Self::Metal(t), Self::Metal(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Metal(storage)) } (_, lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "where", } .bt()), } } pub(crate) fn gather( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "index-add")?; match (self, indexes) { (Self::Cpu(s), Self::Cpu(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn scatter_add( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, source: &Self, source_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "scatter-add")?; self.same_device(source, "scatter-add")?; match (self, indexes, source) { (Self::Cpu(s), Self::Cpu(indexes), Self::Cpu(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes), Self::Cuda(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes), Self::Metal(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn index_add( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, source: &Self, source_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "index-add")?; self.same_device(source, "index-add")?; match (self, indexes, source) { (Self::Cpu(s), Self::Cpu(indexes), Self::Cpu(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes), Self::Cuda(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes), Self::Metal(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn index_select( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(rhs, "index-select")?; match (self, rhs) { (Self::Cpu(lhs), Self::Cpu(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Metal(storage)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "index-select", } .bt()), } } pub(crate) fn matmul( &self, rhs: &Self, bmnk: (usize, usize, usize, usize), lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, "matmul")?; self.same_dtype(rhs, "matmul")?; match (self, rhs) { (Self::Cpu(lhs), Self::Cpu(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "matmul", } .bt()), } } // self, the source can be strided whereas dst is contiguous. pub(crate) fn copy_strided_src( &self, dst: &mut Self, dst_offset: usize, src_l: &Layout, ) -> Result<()> { match (self, dst) { (Self::Cpu(src), Self::Cpu(dst)) => src.copy_strided_src(dst, dst_offset, src_l), (Self::Cuda(src), Self::Cuda(dst)) => Ok(src.copy_strided_src(dst, dst_offset, src_l)?), (Self::Metal(src), Self::Metal(dst)) => { Ok(src.copy_strided_src(dst, dst_offset, src_l)?) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "copy", } .bt()), } } #[allow(clippy::too_many_arguments)] pub(crate) fn copy2d( &self, dst: &mut Self, d1: usize, d2: usize, src_s: usize, dst_s: usize, src_o: usize, dst_o: usize, ) -> Result<()> { match (self, dst) { (Self::Cpu(src), Self::Cpu(dst)) => src.copy2d(dst, d1, d2, src_s, dst_s, src_o, dst_o), (Self::Cuda(src), Self::Cuda(dst)) => { Ok(src.copy2d(dst, d1, d2, src_s, dst_s, src_o, dst_o)?) } (Self::Metal(src), Self::Metal(dst)) => { Ok(src.copy2d(dst, d1, d2, src_s, dst_s, src_o, dst_o)?) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "copy2d", } .bt()), } } }
candle/candle-core/src/storage.rs/0
{ "file_path": "candle/candle-core/src/storage.rs", "repo_id": "candle", "token_count": 15585 }
# candle-blip The [blip-image-captioning](https://huggingface.co/Salesforce/blip-image-captioning-base) model can generate captions for an input image. ## Running on an example ```bash cargo run --example blip --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg ``` ``` Running on CPU, to run on GPU, build this example with `--features cuda` loaded image Tensor[dims 3, 384, 384; f32] model built several cyclists are riding down a road with cars behind them% ``` ![Leading group, Giro d'Italia 2021](../yolo-v8/assets/bike.jpg)
candle/candle-examples/examples/blip/README.md/0
{ "file_path": "candle/candle-examples/examples/blip/README.md", "repo_id": "candle", "token_count": 190 }
// This example illustrates how to implement custom operations. These operations can provide their // own forward pass (CPU and GPU versions) as well as their backward pass. // // In this example we add the RMS normalization operation and implement it for f32. #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[rustfmt::skip] #[cfg(feature = "cuda")] mod cuda_kernels; use clap::Parser; use candle::{CpuStorage, CustomOp1, Layout, Result, Shape, Tensor}; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, } struct LayerNorm { eps: f32, } impl CustomOp1 for LayerNorm { fn name(&self) -> &'static str { "layer-norm" } fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> { let (dim1, dim2) = layout.shape().dims2()?; let slice = storage.as_slice::<f32>()?; let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => &slice[o1..o2], }; let mut dst = Vec::with_capacity(dim1 * dim2); for idx1 in 0..dim1 { let src = &src[idx1 * dim2..(idx1 + 1) * dim2]; let variance = src.iter().map(|x| x * x).sum::<f32>(); let s_variance = 1f32 / (variance / dim2 as f32 + self.eps).sqrt(); dst.extend(src.iter().map(|x| x * s_variance)) } let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, layout.shape().clone())) } #[cfg(feature = "cuda")] fn cuda_fwd( &self, storage: &candle::CudaStorage, layout: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::backend::BackendStorage; use candle::cuda_backend::cudarc::driver::{LaunchAsync, LaunchConfig}; use candle::cuda_backend::WrapErr; let (d1, d2) = layout.shape().dims2()?; let d1 = d1 as u32; let d2 = d2 as u32; let dev = storage.device().clone(); let slice = storage.as_cuda_slice::<f32>()?; let slice = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => slice.slice(o1..o2), }; let elem_count = layout.shape().elem_count(); let dst = unsafe { dev.alloc::<f32>(elem_count) }.w()?; let func = dev.get_or_load_func("rms_f32", cuda_kernels::LAYERNORM_KERNELS)?; let params = (&dst, &slice, self.eps, d1, d2); let cfg = LaunchConfig { grid_dim: (d1, 1, 1), block_dim: (d2, 1, 1), shared_mem_bytes: 0, }; unsafe { func.launch(cfg, params) }.w()?; let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev); Ok((dst, layout.shape().clone())) } } fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let t = Tensor::arange(0f32, 14f32, &device)?.reshape((2, 7))?; println!("{t}"); let t = t.apply_op1(LayerNorm { eps: 1e-5 })?; println!("{t}"); Ok(()) }
candle/candle-examples/examples/custom-ops/main.rs/0
{ "file_path": "candle/candle-examples/examples/custom-ops/main.rs", "repo_id": "candle", "token_count": 1475 }
use anyhow::{Context, Result}; use std::sync::{Arc, Mutex}; pub const SAMPLE_RATE: usize = 24_000; pub(crate) struct AudioOutputData_ { resampled_data: std::collections::VecDeque<f32>, resampler: rubato::FastFixedIn<f32>, output_buffer: Vec<f32>, input_buffer: Vec<f32>, input_len: usize, } impl AudioOutputData_ { pub(crate) fn new(input_sample_rate: usize, output_sample_rate: usize) -> Result<Self> { use rubato::Resampler; let resampled_data = std::collections::VecDeque::with_capacity(output_sample_rate * 10); let resample_ratio = output_sample_rate as f64 / input_sample_rate as f64; let resampler = rubato::FastFixedIn::new( resample_ratio, f64::max(resample_ratio, 1.0), rubato::PolynomialDegree::Septic, 1024, 1, )?; let input_buffer = resampler.input_buffer_allocate(true).remove(0); let output_buffer = resampler.output_buffer_allocate(true).remove(0); Ok(Self { resampled_data, resampler, input_buffer, output_buffer, input_len: 0, }) } pub fn reset(&mut self) { use rubato::Resampler; self.output_buffer.fill(0.); self.input_buffer.fill(0.); self.resampler.reset(); self.resampled_data.clear(); } pub(crate) fn take_all(&mut self) -> Vec<f32> { let mut data = Vec::with_capacity(self.resampled_data.len()); while let Some(elem) = self.resampled_data.pop_back() { data.push(elem); } data } pub(crate) fn is_empty(&self) -> bool { self.resampled_data.is_empty() } // Assumes that the input buffer is large enough. fn push_input_buffer(&mut self, samples: &[f32]) { self.input_buffer[self.input_len..self.input_len + samples.len()].copy_from_slice(samples); self.input_len += samples.len() } pub(crate) fn push_samples(&mut self, samples: &[f32]) -> Result<()> { use rubato::Resampler; let mut pos_in = 0; loop { let rem = self.input_buffer.len() - self.input_len; let pos_end = usize::min(pos_in + rem, samples.len()); self.push_input_buffer(&samples[pos_in..pos_end]); pos_in = pos_end; if self.input_len < self.input_buffer.len() { break; } let (_, out_len) = self.resampler.process_into_buffer( &[&self.input_buffer], &mut [&mut self.output_buffer], None, )?; for &elem in self.output_buffer[..out_len].iter() { self.resampled_data.push_front(elem) } self.input_len = 0; } Ok(()) } } type AudioOutputData = Arc<Mutex<AudioOutputData_>>; pub(crate) fn setup_output_stream() -> Result<(cpal::Stream, AudioOutputData)> { use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; println!("Setup audio output stream!"); let host = cpal::default_host(); let device = host .default_output_device() .context("no output device available")?; let mut supported_configs_range = device.supported_output_configs()?; let config_range = match supported_configs_range.find(|c| c.channels() == 1) { // On macOS, it's commonly the case that there are only stereo outputs. None => device .supported_output_configs()? .next() .context("no audio output available")?, Some(config_range) => config_range, }; let sample_rate = cpal::SampleRate(SAMPLE_RATE as u32).clamp( config_range.min_sample_rate(), config_range.max_sample_rate(), ); let config: cpal::StreamConfig = config_range.with_sample_rate(sample_rate).into(); let channels = config.channels as usize; println!( "cpal device: {} {} {config:?}", device.name().unwrap_or_else(|_| "unk".to_string()), config.sample_rate.0 ); let audio_data = Arc::new(Mutex::new(AudioOutputData_::new( SAMPLE_RATE, config.sample_rate.0 as usize, )?)); let ad = audio_data.clone(); let stream = device.build_output_stream( &config, move |data: &mut [f32], _: &cpal::OutputCallbackInfo| { data.fill(0.); let mut ad = ad.lock().unwrap(); let mut last_elem = 0f32; for (idx, elem) in data.iter_mut().enumerate() { if idx % channels == 0 { match ad.resampled_data.pop_back() { None => break, Some(v) => { last_elem = v; *elem = v } } } else { *elem = last_elem } } }, move |err| eprintln!("cpal error: {err}"), None, // None=blocking, Some(Duration)=timeout )?; stream.play()?; Ok((stream, audio_data)) } pub(crate) fn setup_input_stream() -> Result<(cpal::Stream, AudioOutputData)> { use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; println!("Setup audio input stream!"); let host = cpal::default_host(); let device = host .default_input_device() .context("no input device available")?; let mut supported_configs_range = device.supported_input_configs()?; let config_range = supported_configs_range .find(|c| c.channels() == 1) .context("no audio input available")?; let sample_rate = cpal::SampleRate(SAMPLE_RATE as u32).clamp( config_range.min_sample_rate(), config_range.max_sample_rate(), ); let config: cpal::StreamConfig = config_range.with_sample_rate(sample_rate).into(); println!( "cpal device: {} {} {config:?}", device.name().unwrap_or_else(|_| "unk".to_string()), config.sample_rate.0 ); let audio_data = Arc::new(Mutex::new(AudioOutputData_::new( config.sample_rate.0 as usize, SAMPLE_RATE, )?)); let ad = audio_data.clone(); let stream = device.build_input_stream( &config, move |data: &[f32], _: &cpal::InputCallbackInfo| { let mut ad = ad.lock().unwrap(); if let Err(err) = ad.push_samples(data) { eprintln!("error processing audio input {err:?}") } }, move |err| eprintln!("cpal error: {err}"), None, // None=blocking, Some(Duration)=timeout )?; stream.play()?; Ok((stream, audio_data)) } fn conv<T>(samples: &mut Vec<f32>, data: std::borrow::Cow<symphonia::core::audio::AudioBuffer<T>>) where T: symphonia::core::sample::Sample, f32: symphonia::core::conv::FromSample<T>, { use symphonia::core::audio::Signal; use symphonia::core::conv::FromSample; samples.extend(data.chan(0).iter().map(|v| f32::from_sample(*v))) } pub(crate) fn pcm_decode<P: AsRef<std::path::Path>>(path: P) -> Result<(Vec<f32>, u32)> { use symphonia::core::audio::{AudioBufferRef, Signal}; let src = std::fs::File::open(path)?; let mss = symphonia::core::io::MediaSourceStream::new(Box::new(src), Default::default()); let hint = symphonia::core::probe::Hint::new(); let meta_opts: symphonia::core::meta::MetadataOptions = Default::default(); let fmt_opts: symphonia::core::formats::FormatOptions = Default::default(); let probed = symphonia::default::get_probe().format(&hint, mss, &fmt_opts, &meta_opts)?; let mut format = probed.format; let track = format .tracks() .iter() .find(|t| t.codec_params.codec != symphonia::core::codecs::CODEC_TYPE_NULL) .expect("no supported audio tracks"); let mut decoder = symphonia::default::get_codecs() .make(&track.codec_params, &Default::default()) .expect("unsupported codec"); let track_id = track.id; let sample_rate = track.codec_params.sample_rate.unwrap_or(0); let mut pcm_data = Vec::new(); while let Ok(packet) = format.next_packet() { while !format.metadata().is_latest() { format.metadata().pop(); } if packet.track_id() != track_id { continue; } match decoder.decode(&packet)? { AudioBufferRef::F32(buf) => pcm_data.extend(buf.chan(0)), AudioBufferRef::U8(data) => conv(&mut pcm_data, data), AudioBufferRef::U16(data) => conv(&mut pcm_data, data), AudioBufferRef::U24(data) => conv(&mut pcm_data, data), AudioBufferRef::U32(data) => conv(&mut pcm_data, data), AudioBufferRef::S8(data) => conv(&mut pcm_data, data), AudioBufferRef::S16(data) => conv(&mut pcm_data, data), AudioBufferRef::S24(data) => conv(&mut pcm_data, data), AudioBufferRef::S32(data) => conv(&mut pcm_data, data), AudioBufferRef::F64(data) => conv(&mut pcm_data, data), } } Ok((pcm_data, sample_rate)) } pub(crate) fn resample(pcm_in: &[f32], sr_in: usize, sr_out: usize) -> Result<Vec<f32>> { use rubato::Resampler; let mut pcm_out = Vec::with_capacity((pcm_in.len() as f64 * sr_out as f64 / sr_in as f64) as usize + 1024); let mut resampler = rubato::FftFixedInOut::<f32>::new(sr_in, sr_out, 1024, 1)?; let mut output_buffer = resampler.output_buffer_allocate(true); let mut pos_in = 0; while pos_in + resampler.input_frames_next() < pcm_in.len() { let (in_len, out_len) = resampler.process_into_buffer(&[&pcm_in[pos_in..]], &mut output_buffer, None)?; pos_in += in_len; pcm_out.extend_from_slice(&output_buffer[0][..out_len]); } if pos_in < pcm_in.len() { let (_in_len, out_len) = resampler.process_partial_into_buffer( Some(&[&pcm_in[pos_in..]]), &mut output_buffer, None, )?; pcm_out.extend_from_slice(&output_buffer[0][..out_len]); } Ok(pcm_out) }
candle/candle-examples/examples/encodec/audio_io.rs/0
{ "file_path": "candle/candle-examples/examples/encodec/audio_io.rs", "repo_id": "candle", "token_count": 4796 }
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use candle_transformers::models::glm4::*; use clap::Parser; use hf_hub::{Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: Model, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, args: Args, dtype: DType, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new(model: Model, tokenizer: Tokenizer, args: Args, device: &Device, dtype: DType) -> Self { let logits_processor = LogitsProcessor::new(args.seed, Some(args.temperature), Some(args.top_p)); Self { model, tokenizer, logits_processor, args, device: device.clone(), dtype, } } fn run(&mut self) -> anyhow::Result<()> { use std::io::Write; let args = &self.args; println!("starting the inference loop"); let tokens = self .tokenizer .encode(args.prompt.to_string(), true) .expect("tokens error"); if tokens.is_empty() { panic!("Empty prompts are not supported in the chatglm model.") } if args.verbose { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('▁', " ").replace("<0x0A>", "\n"); println!("{id:7} -> '{token}'"); } } else { print!("{}", &args.prompt); std::io::stdout().flush()?; } let eos_token = match self.tokenizer.get_vocab(true).get("<|endoftext|>") { Some(token) => *token, None => panic!("cannot find the endoftext token"), }; let mut tokens = tokens.get_ids().to_vec(); let mut generated_tokens = 0usize; std::io::stdout().flush().expect("output flush error"); let start_gen = std::time::Instant::now(); for index in 0..args.sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input)?; let logits = logits.squeeze(0)?.to_dtype(self.dtype)?; let logits = if args.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(args.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, args.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } let token = self .tokenizer .decode(&[next_token], true) .expect("token decode error"); if args.verbose { println!( "[Count: {}] [Raw Token: {}] [Decode Token: {}]", generated_tokens, next_token, token ); } else { print!("{token}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { #[arg(name = "cache", short)] cache_path: Option<String>, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Display the token for the specified prompt. #[arg(long)] prompt: String, /// Display the tokens for the specified prompt and outputs. #[arg(long)] verbose: bool, /// The temperature used to generate samples. #[arg(long, default_value_t = 0.8)] temperature: f64, /// Nucleus sampling probability cutoff. #[arg(long, default_value_t = 0.8)] top_p: f64, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 8192)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] weight_path: Option<String>, #[arg(long)] tokenizer: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.2)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> anyhow::Result<()> { let args = Args::parse(); println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature, args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = match args.cache_path.as_ref() { None => hf_hub::api::sync::Api::new()?, Some(path) => { hf_hub::api::sync::ApiBuilder::from_cache(hf_hub::Cache::new(path.to_string().into())) .build() .map_err(anyhow::Error::msg)? } }; let model_id = match args.model_id.as_ref() { Some(model_id) => model_id.to_string(), None => "THUDM/glm-4-9b".to_string(), }; let revision = match args.revision.as_ref() { Some(rev) => rev.to_string(), None => "main".to_string(), }; let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision)); let tokenizer_filename = match args.tokenizer.as_ref() { Some(file) => std::path::PathBuf::from(file), None => api .model("THUDM/codegeex4-all-9b".to_string()) .get("tokenizer.json") .map_err(anyhow::Error::msg)?, }; let config_filename = match &args.weight_path { Some(path) => std::path::Path::new(path).join("config.json"), _ => repo.get("config.json")?, }; let filenames = match &args.weight_path { Some(path) => { candle_examples::hub_load_local_safetensors(path, "model.safetensors.index.json")? } _ => candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).expect("Tokenizer Error"); let start = std::time::Instant::now(); let config: Config = serde_json::from_slice(&std::fs::read(config_filename)?)?; let device = candle_examples::device(args.cpu)?; let dtype = if device.is_cuda() { DType::BF16 } else { DType::F32 }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let model = Model::new(&config, vb)?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new(model, tokenizer, args, &device, dtype); pipeline.run()?; Ok(()) }
candle/candle-examples/examples/glm4/main.rs/0
{ "file_path": "candle/candle-examples/examples/glm4/main.rs", "repo_id": "candle", "token_count": 3621 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::{ generation::LogitsProcessor, models::{moondream, quantized_moondream}, }; use tokenizers::Tokenizer; enum Model { Moondream(moondream::Model), Quantized(quantized_moondream::Model), } struct TextGeneration { model: Model, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer, logits_processor, repeat_penalty, repeat_last_n, verbose_prompt, device: device.clone(), } } fn run(&mut self, prompt: &str, image_embeds: &Tensor, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); let tokens = self.tokenizer.encode(prompt, true).map_err(E::msg)?; if tokens.is_empty() { anyhow::bail!("Empty prompts are not supported in the Moondream model.") } if self.verbose_prompt { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('▁', " ").replace("<0x0A>", "\n"); println!("{id:7} -> '{token}'"); } } let mut tokens = tokens.get_ids().to_vec(); let mut generated_tokens = 0usize; // Moondream tokenizer bos_token and eos_token is "<|endoftext|>" // https://huggingface.co/vikhyatk/moondream2/blob/main/special_tokens_map.json let special_token = match self.tokenizer.get_vocab(true).get("<|endoftext|>") { Some(token) => *token, None => anyhow::bail!("cannot find the special token"), }; let (bos_token, eos_token) = (special_token, special_token); let start_gen = std::time::Instant::now(); let mut load_t = std::time::Duration::from_secs_f64(0f64); for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = if index > 0 { match self.model { Model::Moondream(ref mut model) => model.text_model.forward(&input)?, Model::Quantized(ref mut model) => model.text_model.forward(&input)?, } } else { let bos_token = Tensor::new(&[bos_token], &self.device)?.unsqueeze(0)?; let logits = match self.model { Model::Moondream(ref mut model) => { model .text_model .forward_with_img(&bos_token, &input, image_embeds)? } Model::Quantized(ref mut model) => { model .text_model .forward_with_img(&bos_token, &input, image_embeds)? } }; load_t = start_gen.elapsed(); println!("load_t: {:?}", load_t); logits }; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token || tokens.ends_with(&[27, 10619, 29] /* <END> */) { break; } let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?; print!("{token}"); std::io::stdout().flush()?; } let dt = start_gen.elapsed() - load_t; println!( "\ngenerated in {} seconds\n{generated_tokens} tokens generated ({:.2} token/s)", dt.as_secs_f64(), (generated_tokens - 1) as f64 / dt.as_secs_f64() ); Ok(()) } } #[derive(Parser)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Display the token for the specified prompt. #[arg(long)] verbose_prompt: bool, #[arg(long)] prompt: String, #[arg(long)] image: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 0)] seed: u64, #[arg(long, default_value_t = 5000)] sample_len: usize, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.0)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] quantized: bool, /// Use f16 precision for all the computations rather than f32. #[arg(long)] f16: bool, #[arg(long)] model_file: Option<String>, #[arg(long)] tokenizer_file: Option<String>, } /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 378, 378). pub fn load_image<P: AsRef<std::path::Path>>(p: P) -> candle::Result<Tensor> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill(378, 378, image::imageops::FilterType::Triangle); // Adjusted to 378x378 let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (378, 378, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } #[tokio::main] async fn main() -> anyhow::Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = hf_hub::api::tokio::Api::new()?; let (model_id, revision) = match args.model_id { Some(model_id) => (model_id.to_string(), None), None => { if args.quantized { ("santiagomed/candle-moondream".to_string(), None) } else { ( "vikhyatk/moondream1".to_string(), Some("f6e9da68e8f1b78b8f3ee10905d56826db7a5802"), ) } } }; let revision = match (args.revision, revision) { (Some(r), _) => r, (None, Some(r)) => r.to_string(), (None, None) => "main".to_string(), }; let repo = api.repo(hf_hub::Repo::with_revision( model_id, hf_hub::RepoType::Model, revision, )); let model_file = match args.model_file { Some(m) => m.into(), None => { if args.quantized { repo.get("model-q4_0.gguf").await? } else { repo.get("model.safetensors").await? } } }; let tokenizer = match args.tokenizer_file { Some(m) => m.into(), None => repo.get("tokenizer.json").await?, }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let config = moondream::Config::v2(); let dtype = if args.quantized { if args.f16 { anyhow::bail!("Quantized model does not support f16"); } DType::F32 } else if device.is_cuda() || args.f16 { DType::F16 } else { DType::F32 }; let model = if args.quantized { let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf( &model_file, &device, )?; let model = quantized_moondream::Model::new(&config, vb)?; Model::Quantized(model) } else { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? }; let model = moondream::Model::new(&config, vb)?; Model::Moondream(model) }; println!("loaded the model in {:?}", start.elapsed()); let start = std::time::Instant::now(); let image = load_image(args.image)? .to_device(&device)? .to_dtype(dtype)?; let image_embeds = image.unsqueeze(0)?; let image_embeds = match model { Model::Moondream(ref m) => image_embeds.apply(m.vision_encoder())?, Model::Quantized(ref m) => image_embeds.apply(m.vision_encoder())?, }; println!( "loaded and encoded the image {image:?} in {:?}", start.elapsed() ); let prompt = format!("\n\nQuestion: {0}\n\nAnswer:", args.prompt); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, args.verbose_prompt, &device, ); pipeline.run(&prompt, &image_embeds, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/moondream/main.rs/0
{ "file_path": "candle/candle-examples/examples/moondream/main.rs", "repo_id": "candle", "token_count": 5490 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::{Parser, ValueEnum}; use candle_examples::token_output_stream::TokenOutputStream; use candle_transformers::models::mixformer::{Config, MixFormerSequentialForCausalLM as MixFormer}; use candle_transformers::models::phi::{Config as PhiConfig, Model as Phi}; use candle_transformers::models::phi3::{Config as Phi3Config, Model as Phi3}; use candle_transformers::models::quantized_mixformer::MixFormerSequentialForCausalLM as QMixFormer; use candle::{DType, Device, IndexOp, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; enum Model { MixFormer(MixFormer), Phi(Phi), Phi3(Phi3), Quantized(QMixFormer), } struct TextGeneration { model: Model, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, verbose_prompt, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); let tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)?; if tokens.is_empty() { anyhow::bail!("Empty prompts are not supported in the phi model.") } if self.verbose_prompt { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('▁', " ").replace("<0x0A>", "\n"); println!("{id:7} -> '{token}'"); } } let mut tokens = tokens.get_ids().to_vec(); let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("<|endoftext|>") { Some(token) => token, None => anyhow::bail!("cannot find the endoftext token"), }; print!("{prompt}"); std::io::stdout().flush()?; let start_gen = std::time::Instant::now(); let mut pos = 0; for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = match &mut self.model { Model::MixFormer(m) => m.forward(&input)?, Model::Phi(m) => m.forward(&input)?, Model::Quantized(m) => m.forward(&input)?, Model::Phi3(m) => m.forward(&input, pos)?.i((.., 0, ..))?, }; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { if let Some(t) = self.tokenizer.decode_rest()? { print!("{t}"); std::io::stdout().flush()?; } break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } pos += context_size; } let dt = start_gen.elapsed(); println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Clone, Copy, Debug, ValueEnum, PartialEq, Eq)] enum WhichModel { #[value(name = "1")] V1, #[value(name = "1.5")] V1_5, #[value(name = "2")] V2, #[value(name = "3")] V3, #[value(name = "3-medium")] V3Medium, #[value(name = "2-old")] V2Old, PuffinPhiV2, PhiHermes, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Display the token for the specified prompt. #[arg(long)] verbose_prompt: bool, #[arg(long)] prompt: Option<String>, #[arg(long)] mmlu_dir: Option<String>, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 5000)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long, default_value = "2")] model: WhichModel, #[arg(long)] revision: Option<String>, #[arg(long)] weight_file: Option<String>, #[arg(long)] tokenizer: Option<String>, #[arg(long)] quantized: bool, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, /// The dtype to be used for running the model, e.g. f32, bf16, or f16. #[arg(long)] dtype: Option<String>, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let model_id = match args.model_id { Some(model_id) => model_id.to_string(), None => { if args.quantized { "lmz/candle-quantized-phi".to_string() } else { match args.model { WhichModel::V1 => "microsoft/phi-1".to_string(), WhichModel::V1_5 => "microsoft/phi-1_5".to_string(), WhichModel::V2 | WhichModel::V2Old => "microsoft/phi-2".to_string(), WhichModel::V3 => "microsoft/Phi-3-mini-4k-instruct".to_string(), WhichModel::V3Medium => "microsoft/Phi-3-medium-4k-instruct".to_string(), WhichModel::PuffinPhiV2 | WhichModel::PhiHermes => { "lmz/candle-quantized-phi".to_string() } } } } }; let revision = match args.revision { Some(rev) => rev.to_string(), None => { if args.quantized { "main".to_string() } else { match args.model { WhichModel::V1 => "refs/pr/8".to_string(), WhichModel::V1_5 => "refs/pr/73".to_string(), WhichModel::V2Old => "834565c23f9b28b96ccbeabe614dd906b6db551a".to_string(), WhichModel::V2 | WhichModel::V3 | WhichModel::V3Medium | WhichModel::PuffinPhiV2 | WhichModel::PhiHermes => "main".to_string(), } } } }; let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision)); let tokenizer_filename = match args.tokenizer { Some(file) => std::path::PathBuf::from(file), None => match args.model { WhichModel::V1 | WhichModel::V1_5 | WhichModel::V2 | WhichModel::V2Old | WhichModel::V3 | WhichModel::V3Medium => repo.get("tokenizer.json")?, WhichModel::PuffinPhiV2 | WhichModel::PhiHermes => { repo.get("tokenizer-puffin-phi-v2.json")? } }, }; let filenames = match args.weight_file { Some(weight_file) => vec![std::path::PathBuf::from(weight_file)], None => { if args.quantized { match args.model { WhichModel::V1 => vec![repo.get("model-v1-q4k.gguf")?], WhichModel::V1_5 => vec![repo.get("model-q4k.gguf")?], WhichModel::V2 | WhichModel::V2Old => vec![repo.get("model-v2-q4k.gguf")?], WhichModel::PuffinPhiV2 => vec![repo.get("model-puffin-phi-v2-q4k.gguf")?], WhichModel::PhiHermes => vec![repo.get("model-phi-hermes-1_3B-q4k.gguf")?], WhichModel::V3 | WhichModel::V3Medium => anyhow::bail!( "use the quantized or quantized-phi examples for quantized phi-v3" ), } } else { match args.model { WhichModel::V1 | WhichModel::V1_5 => vec![repo.get("model.safetensors")?], WhichModel::V2 | WhichModel::V2Old | WhichModel::V3 | WhichModel::V3Medium => { candle_examples::hub_load_safetensors( &repo, "model.safetensors.index.json", )? } WhichModel::PuffinPhiV2 => vec![repo.get("model-puffin-phi-v2.safetensors")?], WhichModel::PhiHermes => vec![repo.get("model-phi-hermes-1_3B.safetensors")?], } } } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let config = || match args.model { WhichModel::V1 => Config::v1(), WhichModel::V1_5 => Config::v1_5(), WhichModel::V2 | WhichModel::V2Old => Config::v2(), WhichModel::PuffinPhiV2 => Config::puffin_phi_v2(), WhichModel::PhiHermes => Config::phi_hermes_1_3b(), WhichModel::V3 | WhichModel::V3Medium => { panic!("use the quantized or quantized-phi examples for quantized phi-v3") } }; let device = candle_examples::device(args.cpu)?; let model = if args.quantized { let config = config(); let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf( &filenames[0], &device, )?; let model = match args.model { WhichModel::V2 | WhichModel::V2Old => QMixFormer::new_v2(&config, vb)?, _ => QMixFormer::new(&config, vb)?, }; Model::Quantized(model) } else { let dtype = match args.dtype { Some(dtype) => std::str::FromStr::from_str(&dtype)?, None => { if args.model == WhichModel::V3 || args.model == WhichModel::V3Medium { device.bf16_default_to_f32() } else { DType::F32 } } }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; match args.model { WhichModel::V1 | WhichModel::V1_5 | WhichModel::V2 => { let config_filename = repo.get("config.json")?; let config = std::fs::read_to_string(config_filename)?; let config: PhiConfig = serde_json::from_str(&config)?; let phi = Phi::new(&config, vb)?; Model::Phi(phi) } WhichModel::V3 | WhichModel::V3Medium => { let config_filename = repo.get("config.json")?; let config = std::fs::read_to_string(config_filename)?; let config: Phi3Config = serde_json::from_str(&config)?; let phi3 = Phi3::new(&config, vb)?; Model::Phi3(phi3) } WhichModel::V2Old => { let config = config(); Model::MixFormer(MixFormer::new_v2(&config, vb)?) } WhichModel::PhiHermes | WhichModel::PuffinPhiV2 => { let config = config(); Model::MixFormer(MixFormer::new(&config, vb)?) } } }; println!("loaded the model in {:?}", start.elapsed()); match (args.prompt, args.mmlu_dir) { (None, None) | (Some(_), Some(_)) => { anyhow::bail!("exactly one of --prompt and --mmlu-dir must be specified") } (Some(prompt), None) => { let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, args.verbose_prompt, &device, ); pipeline.run(&prompt, args.sample_len)?; } (None, Some(mmlu_dir)) => mmlu(model, tokenizer, &device, mmlu_dir)?, } Ok(()) } fn mmlu<P: AsRef<std::path::Path>>( mut model: Model, tokenizer: Tokenizer, device: &Device, mmlu_dir: P, ) -> anyhow::Result<()> { for dir_entry in mmlu_dir.as_ref().read_dir()?.flatten() { let dir_entry = dir_entry.path(); let theme = match dir_entry.file_stem().and_then(|v| v.to_str()) { None => "".to_string(), Some(v) => match v.strip_suffix("_test") { None => v.replace('_', " "), Some(v) => v.replace('_', " "), }, }; if dir_entry.extension().as_ref().and_then(|v| v.to_str()) != Some("csv") { continue; } println!("reading {dir_entry:?}"); let dir_entry = std::fs::File::open(dir_entry)?; let mut reader = csv::ReaderBuilder::new() .has_headers(false) .from_reader(dir_entry); let token_a = tokenizer.token_to_id("A").unwrap(); let token_b = tokenizer.token_to_id("B").unwrap(); let token_c = tokenizer.token_to_id("C").unwrap(); let token_d = tokenizer.token_to_id("D").unwrap(); for row in reader.records() { let row = match row { Err(_) => continue, Ok(row) => row, }; if row.len() < 5 { continue; } let question = row.get(0).unwrap(); let answer_a = row.get(1).unwrap(); let answer_b = row.get(2).unwrap(); let answer_c = row.get(3).unwrap(); let answer_d = row.get(4).unwrap(); let answer = row.get(5).unwrap(); let prompt = format!( "{} {theme}.\n{question}\nA. {answer_a}\nB. {answer_b}\nC. {answer_c}\nD. {answer_d}\nAnswer:\n", "The following are multiple choice questions (with answers) about" ); let tokens = tokenizer.encode(prompt.as_str(), true).map_err(E::msg)?; let tokens = tokens.get_ids().to_vec(); let input = Tensor::new(tokens, device)?.unsqueeze(0)?; let logits = match &mut model { Model::MixFormer(m) => { m.clear_kv_cache(); m.forward(&input)? } Model::Phi(m) => { m.clear_kv_cache(); m.forward(&input)? } Model::Phi3(m) => { m.clear_kv_cache(); m.forward(&input, 0)? } Model::Quantized(m) => { m.clear_kv_cache(); m.forward(&input)? } }; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits_v: Vec<f32> = logits.to_vec1()?; let pr_a = logits_v[token_a as usize]; let pr_b = logits_v[token_b as usize]; let pr_c = logits_v[token_c as usize]; let pr_d = logits_v[token_d as usize]; let model_answer = if pr_a > pr_b && pr_a > pr_c && pr_a > pr_d { "A" } else if pr_b > pr_c && pr_b > pr_d { "B" } else if pr_c > pr_d { "C" } else { "D" }; println!("{prompt}\n -> {model_answer} vs {answer}"); } } Ok(()) }
candle/candle-examples/examples/phi/main.rs/0
{ "file_path": "candle/candle-examples/examples/phi/main.rs", "repo_id": "candle", "token_count": 9478 }
import gymnasium as gym import numpy as np from collections import deque from PIL import Image from multiprocessing import Process, Pipe # atari_wrappers.py class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): """Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0. """ gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None assert env.unwrapped.get_action_meanings()[0] == 'NOOP' def reset(self): """ Do no-op action for a number of steps in [1, noop_max].""" self.env.reset() if self.override_num_noops is not None: noops = self.override_num_noops else: noops = self.unwrapped.np_random.integers(1, self.noop_max + 1) #pylint: disable=E1101 assert noops > 0 obs = None for _ in range(noops): obs, _, done, _ = self.env.step(0) if done: obs = self.env.reset() return obs class FireResetEnv(gym.Wrapper): def __init__(self, env): """Take action on reset for environments that are fixed until firing.""" gym.Wrapper.__init__(self, env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE' assert len(env.unwrapped.get_action_meanings()) >= 3 def reset(self): self.env.reset() obs, _, done, _ = self.env.step(1) if done: self.env.reset() obs, _, done, _ = self.env.step(2) if done: self.env.reset() return obs class ImageSaver(gym.Wrapper): def __init__(self, env, img_path, rank): gym.Wrapper.__init__(self, env) self._cnt = 0 self._img_path = img_path self._rank = rank def step(self, action): step_result = self.env.step(action) obs, _, _, _ = step_result img = Image.fromarray(obs, 'RGB') img.save('%s/out%d-%05d.png' % (self._img_path, self._rank, self._cnt)) self._cnt += 1 return step_result class EpisodicLifeEnv(gym.Wrapper): def __init__(self, env): """Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation. """ gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True def step(self, action): obs, reward, done, info = self.env.step(action) self.was_real_done = done # check current lives, make loss of life terminal, # then update lives to handle bonus lives lives = self.env.unwrapped.ale.lives() if lives < self.lives and lives > 0: # for Qbert sometimes we stay in lives == 0 condition for a few frames # so its important to keep lives > 0, so that we only reset once # the environment advertises done. done = True self.lives = lives return obs, reward, done, info def reset(self): """Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes. """ if self.was_real_done: obs = self.env.reset() else: # no-op step to advance from terminal/lost life state obs, _, _, _ = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs class MaxAndSkipEnv(gym.Wrapper): def __init__(self, env, skip=4): """Return only every `skip`-th frame""" gym.Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._obs_buffer = deque(maxlen=2) self._skip = skip def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for _ in range(self._skip): obs, reward, done, info = self.env.step(action) self._obs_buffer.append(obs) total_reward += reward if done: break max_frame = np.max(np.stack(self._obs_buffer), axis=0) return max_frame, total_reward, done, info def reset(self): """Clear past frame buffer and init. to first obs. from inner env.""" self._obs_buffer.clear() obs = self.env.reset() self._obs_buffer.append(obs) return obs class ClipRewardEnv(gym.RewardWrapper): def reward(self, reward): """Bin reward to {+1, 0, -1} by its sign.""" return np.sign(reward) class WarpFrame(gym.ObservationWrapper): def __init__(self, env): """Warp frames to 84x84 as done in the Nature paper and later work.""" gym.ObservationWrapper.__init__(self, env) self.res = 84 self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.res, self.res, 1), dtype='uint8') def observation(self, obs): frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32')) frame = np.array(Image.fromarray(frame).resize((self.res, self.res), resample=Image.BILINEAR), dtype=np.uint8) return frame.reshape((self.res, self.res, 1)) class FrameStack(gym.Wrapper): def __init__(self, env, k): """Buffer observations and stack across channels (last axis).""" gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape assert shp[2] == 1 # can only stack 1-channel frames self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], k), dtype='uint8') def reset(self): """Clear buffer and re-fill by duplicating the first observation.""" ob = self.env.reset() for _ in range(self.k): self.frames.append(ob) return self.observation() def step(self, action): ob, reward, done, info = self.env.step(action) self.frames.append(ob) return self.observation(), reward, done, info def observation(self): assert len(self.frames) == self.k return np.concatenate(self.frames, axis=2) def wrap_deepmind(env, episode_life=True, clip_rewards=True): """Configure environment for DeepMind-style Atari. Note: this does not include frame stacking!""" assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip if episode_life: env = EpisodicLifeEnv(env) env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if clip_rewards: env = ClipRewardEnv(env) return env # envs.py def make_env(env_id, img_dir, seed, rank): def _thunk(): env = gym.make(env_id) env.reset(seed=(seed + rank)) if img_dir is not None: env = ImageSaver(env, img_dir, rank) env = wrap_deepmind(env) env = WrapPyTorch(env) return env return _thunk class WrapPyTorch(gym.ObservationWrapper): def __init__(self, env=None): super(WrapPyTorch, self).__init__(env) self.observation_space = gym.spaces.Box(0.0, 1.0, [1, 84, 84], dtype='float32') def observation(self, observation): return observation.transpose(2, 0, 1) # vecenv.py class VecEnv(object): """ Vectorized environment base class """ def step(self, vac): """ Apply sequence of actions to sequence of environments actions -> (observations, rewards, news) where 'news' is a boolean vector indicating whether each element is new. """ raise NotImplementedError def reset(self): """ Reset all environments """ raise NotImplementedError def close(self): pass # subproc_vec_env.py def worker(remote, env_fn_wrapper): env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) if done: ob = env.reset() remote.send((ob, reward, done, info)) elif cmd == 'reset': ob = env.reset() remote.send(ob) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send((env.action_space, env.observation_space)) else: raise NotImplementedError class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) class SubprocVecEnv(VecEnv): def __init__(self, env_fns): """ envs: list of gym environments to run in subprocesses """ nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, CloudpickleWrapper(env_fn))) for (work_remote, env_fn) in zip(self.work_remotes, env_fns)] for p in self.ps: p.start() self.remotes[0].send(('get_spaces', None)) self.action_space, self.observation_space = self.remotes[0].recv() def step(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) results = [remote.recv() for remote in self.remotes] obs, rews, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() @property def num_envs(self): return len(self.remotes) # Create the environment. def make(env_name, img_dir, num_processes): envs = SubprocVecEnv([ make_env(env_name, img_dir, 1337, i) for i in range(num_processes) ]) return envs
candle/candle-examples/examples/reinforcement-learning/atari_wrappers.py/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/atari_wrappers.py", "repo_id": "candle", "token_count": 4740 }
# candle-segformer - [HuggingFace Segformer Model Card][segformer] - [`mit-b0` - An encoder only pretrained model][encoder] - [`segformer-b0-finetuned-ade-512-512` - A fine tuned model for segmentation][ade512] ## How to run the example If you want you can use the example images from this [pull request][pr], download them and supply the path to the image as an argument to the example. ```bash # run the image classification task cargo run --example segformer classify <path-to-image> # run the segmentation task cargo run --example segformer segment <path-to-image> ``` Example output for classification: ```text classification logits [3.275261e-5, 0.0008562019, 0.0008868563, 0.9977506, 0.0002465068, 0.0002241473, 2.846596e-6] label: hamburger ``` [pr]: https://github.com/huggingface/candle/pull/1617 [segformer]: https://huggingface.co/docs/transformers/model_doc/segformer [encoder]: https://huggingface.co/nvidia/mit-b0 [ade512]: https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512
candle/candle-examples/examples/segformer/README.md/0
{ "file_path": "candle/candle-examples/examples/segformer/README.md", "repo_id": "candle", "token_count": 357 }
use anyhow::{Error as E, Ok, Result}; use candle::{DType, IndexOp, Module, Tensor, D}; use candle_transformers::models::{stable_diffusion, t5}; use std::path::PathBuf; use tokenizers::tokenizer::Tokenizer; struct ClipWithTokenizer { clip: stable_diffusion::clip::ClipTextTransformer, config: stable_diffusion::clip::Config, tokenizer: Tokenizer, max_position_embeddings: usize, } impl ClipWithTokenizer { fn new( vb: candle_nn::VarBuilder, config: stable_diffusion::clip::Config, tokenizer_path: &str, max_position_embeddings: usize, ) -> Result<Self> { let clip = stable_diffusion::clip::ClipTextTransformer::new(vb, &config)?; let path_buf = hf_hub::api::sync::Api::new()? .model(tokenizer_path.to_string()) .get("tokenizer.json")?; let tokenizer = Tokenizer::from_file(path_buf.to_str().ok_or(E::msg( "Failed to serialize huggingface PathBuf of CLIP tokenizer", ))?) .map_err(E::msg)?; Ok(Self { clip, config, tokenizer, max_position_embeddings, }) } fn encode_text_to_embedding( &self, prompt: &str, device: &candle::Device, ) -> Result<(Tensor, Tensor)> { let pad_id = match &self.config.pad_with { Some(padding) => *self .tokenizer .get_vocab(true) .get(padding.as_str()) .ok_or(E::msg("Failed to tokenize CLIP padding."))?, None => *self .tokenizer .get_vocab(true) .get("<|endoftext|>") .ok_or(E::msg("Failed to tokenize CLIP end-of-text."))?, }; let mut tokens = self .tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let eos_position = tokens.len() - 1; while tokens.len() < self.max_position_embeddings { tokens.push(pad_id) } let tokens = Tensor::new(tokens.as_slice(), device)?.unsqueeze(0)?; let (text_embeddings, text_embeddings_penultimate) = self .clip .forward_until_encoder_layer(&tokens, usize::MAX, -2)?; let text_embeddings_pooled = text_embeddings.i((0, eos_position, ..))?; Ok((text_embeddings_penultimate, text_embeddings_pooled)) } } struct T5WithTokenizer { t5: t5::T5EncoderModel, tokenizer: Tokenizer, max_position_embeddings: usize, } impl T5WithTokenizer { fn new(vb: candle_nn::VarBuilder, max_position_embeddings: usize) -> Result<Self> { let api = hf_hub::api::sync::Api::new()?; let repo = api.repo(hf_hub::Repo::with_revision( "google/t5-v1_1-xxl".to_string(), hf_hub::RepoType::Model, "refs/pr/2".to_string(), )); let config_filename = repo.get("config.json")?; let config = std::fs::read_to_string(config_filename)?; let config: t5::Config = serde_json::from_str(&config)?; let model = t5::T5EncoderModel::load(vb, &config)?; let tokenizer_filename = api .model("lmz/mt5-tokenizers".to_string()) .get("t5-v1_1-xxl.tokenizer.json")?; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; Ok(Self { t5: model, tokenizer, max_position_embeddings, }) } fn encode_text_to_embedding( &mut self, prompt: &str, device: &candle::Device, ) -> Result<Tensor> { let mut tokens = self .tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); tokens.resize(self.max_position_embeddings, 0); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let embeddings = self.t5.forward_dt(&input_token_ids, Some(DType::F32))?; Ok(embeddings) } } pub struct StableDiffusion3TripleClipWithTokenizer { clip_l: ClipWithTokenizer, clip_g: ClipWithTokenizer, clip_g_text_projection: candle_nn::Linear, t5: T5WithTokenizer, } impl StableDiffusion3TripleClipWithTokenizer { pub fn new_split( clip_g_file: &PathBuf, clip_l_file: &PathBuf, t5xxl_file: &PathBuf, device: &candle::Device, ) -> Result<Self> { let vb_clip_g = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[clip_g_file], DType::F16, device)? }; let vb_clip_l = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[clip_l_file], DType::F16, device)? }; let vb_t5 = unsafe { candle_nn::VarBuilder::from_mmaped_safetensors(&[t5xxl_file], DType::F16, device)? }; let max_position_embeddings = 77usize; let clip_l = ClipWithTokenizer::new( vb_clip_l, stable_diffusion::clip::Config::sdxl(), "openai/clip-vit-large-patch14", max_position_embeddings, )?; let text_projection = candle_nn::linear_no_bias(1280, 1280, vb_clip_g.pp("text_projection"))?; let clip_g = ClipWithTokenizer::new( vb_clip_g, stable_diffusion::clip::Config::sdxl2(), "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", max_position_embeddings, )?; let t5 = T5WithTokenizer::new(vb_t5, max_position_embeddings)?; Ok(Self { clip_l, clip_g, clip_g_text_projection: text_projection, t5, }) } pub fn new(vb: candle_nn::VarBuilder) -> Result<Self> { let max_position_embeddings = 77usize; let clip_l = ClipWithTokenizer::new( vb.pp("clip_l.transformer"), stable_diffusion::clip::Config::sdxl(), "openai/clip-vit-large-patch14", max_position_embeddings, )?; let clip_g = ClipWithTokenizer::new( vb.pp("clip_g.transformer"), stable_diffusion::clip::Config::sdxl2(), "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", max_position_embeddings, )?; let text_projection = candle_nn::linear_no_bias(1280, 1280, vb.pp("clip_g.transformer.text_projection"))?; let t5 = T5WithTokenizer::new(vb.pp("t5xxl.transformer"), max_position_embeddings)?; Ok(Self { clip_l, clip_g, clip_g_text_projection: text_projection, t5, }) } pub fn encode_text_to_embedding( &mut self, prompt: &str, device: &candle::Device, ) -> Result<(Tensor, Tensor)> { let (clip_l_embeddings, clip_l_embeddings_pooled) = self.clip_l.encode_text_to_embedding(prompt, device)?; let (clip_g_embeddings, clip_g_embeddings_pooled) = self.clip_g.encode_text_to_embedding(prompt, device)?; let clip_g_embeddings_pooled = self .clip_g_text_projection .forward(&clip_g_embeddings_pooled.unsqueeze(0)?)? .squeeze(0)?; let y = Tensor::cat(&[&clip_l_embeddings_pooled, &clip_g_embeddings_pooled], 0)? .unsqueeze(0)?; let clip_embeddings_concat = Tensor::cat( &[&clip_l_embeddings, &clip_g_embeddings], D::Minus1, )? .pad_with_zeros(D::Minus1, 0, 2048)?; let t5_embeddings = self .t5 .encode_text_to_embedding(prompt, device)? .to_dtype(DType::F16)?; let context = Tensor::cat(&[&clip_embeddings_concat, &t5_embeddings], D::Minus2)?; Ok((context, y)) } }
candle/candle-examples/examples/stable-diffusion-3/clip.rs/0
{ "file_path": "candle/candle-examples/examples/stable-diffusion-3/clip.rs", "repo_id": "candle", "token_count": 4060 }
use image::{DynamicImage, ImageBuffer}; use serde::Deserialize; use std::collections::HashMap; use candle::{DType, Device, Result, Tensor}; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct ProcessorConfig { do_resize: bool, height: u32, width: u32, do_rescale: bool, do_normalize: bool, image_mean: Vec<f32>, image_std: Vec<f32>, } impl Default for ProcessorConfig { fn default() -> Self { Self { do_resize: true, height: 384, width: 384, do_rescale: true, do_normalize: true, image_mean: vec![0.5, 0.5, 0.5], image_std: vec![0.5, 0.5, 0.5], } } } pub struct ViTImageProcessor { do_resize: bool, height: u32, width: u32, do_normalize: bool, image_mean: Vec<f32>, image_std: Vec<f32>, } impl ViTImageProcessor { pub fn new(config: &ProcessorConfig) -> Self { Self { do_resize: config.do_resize, height: config.height, width: config.width, do_normalize: config.do_normalize, image_mean: config.image_mean.clone(), image_std: config.image_std.clone(), } } pub fn preprocess(&self, images: Vec<&str>) -> Result<Tensor> { let height = self.height as usize; let width = self.width as usize; let channels = 3; let images = self.load_images(images)?; let resized_images: Vec<DynamicImage> = if self.do_resize { images .iter() .map(|image| self.resize(image.clone(), None).unwrap()) .collect() } else { images }; let normalized_images: Vec<Tensor> = if self.do_normalize { resized_images .iter() .map(|image| self.normalize(image.clone(), None, None).unwrap()) .collect() } else { let resized_images: Vec<ImageBuffer<image::Rgb<u8>, Vec<u8>>> = resized_images.iter().map(|image| image.to_rgb8()).collect(); let data = resized_images .into_iter() .map(|image| image.into_raw()) .collect::<Vec<Vec<u8>>>(); data.iter() .map(|image| { Tensor::from_vec(image.clone(), (height, width, channels), &Device::Cpu) .unwrap() .permute((2, 0, 1)) .unwrap() }) .collect::<Vec<Tensor>>() }; Tensor::stack(&normalized_images, 0) } fn resize( &self, image: image::DynamicImage, size: Option<HashMap<String, u32>>, ) -> Result<image::DynamicImage> { let (height, width) = match &size { Some(size) => (size.get("height").unwrap(), size.get("width").unwrap()), None => (&self.height, &self.width), }; let resized_image = image.resize_exact(*width, *height, image::imageops::FilterType::Triangle); Ok(resized_image) } fn normalize( &self, image: image::DynamicImage, mean: Option<Vec<f32>>, std: Option<Vec<f32>>, ) -> Result<Tensor> { let mean = match mean { Some(mean) => mean, None => self.image_mean.clone(), }; let std = match std { Some(std) => std, None => self.image_std.clone(), }; let mean = Tensor::from_vec(mean, (3, 1, 1), &Device::Cpu)?; let std = Tensor::from_vec(std, (3, 1, 1), &Device::Cpu)?; let image = image.to_rgb8(); let data = image.into_raw(); let height = self.height as usize; let width = self.width as usize; let channels = 3; let data = Tensor::from_vec(data, &[height, width, channels], &Device::Cpu)?.permute((2, 0, 1))?; (data.to_dtype(DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } pub fn load_images(&self, image_path: Vec<&str>) -> Result<Vec<image::DynamicImage>> { let mut images: Vec<image::DynamicImage> = Vec::new(); for path in image_path { let img = image::ImageReader::open(path)?.decode().unwrap(); images.push(img); } Ok(images) } }
candle/candle-examples/examples/trocr/image_processor.rs/0
{ "file_path": "candle/candle-examples/examples/trocr/image_processor.rs", "repo_id": "candle", "token_count": 2273 }
# candle-wuerstchen: Efficient Pretraining of Text-to-Image Models ![anthropomorphic cat dressed as a fire fighter](./assets/cat.jpg) The `wuerstchen` example is a port of the [diffusers implementation](https://github.com/huggingface/diffusers/tree/19edca82f1ff194c07317369a92b470dbae97f34/src/diffusers/pipelines/wuerstchen) for Würstchen v2. The candle implementation reproduces the same structure/files for models and pipelines. Useful resources: - [Official implementation](https://github.com/dome272/Wuerstchen). - [Arxiv paper](https://arxiv.org/abs/2306.00637). - Blog post: [Introducing Würstchen: Fast Diffusion for Image Generation](https://huggingface.co/blog/wuerstchen). ## Getting the weights The weights are automatically downloaded for you from the [HuggingFace Hub](https://huggingface.co/) on the first run. There are various command line flags to use local files instead, run with `--help` to learn about them. ## Running some example. ```bash cargo run --example wuerstchen --release --features cuda,cudnn -- \ --prompt "Anthropomorphic cat dressed as a fire fighter" ``` The final image is named `sd_final.png` by default.
candle/candle-examples/examples/wuerstchen/README.md/0
{ "file_path": "candle/candle-examples/examples/wuerstchen/README.md", "repo_id": "candle", "token_count": 358 }
use candle::{DType, IndexOp, Result, Tensor, D}; use candle_nn::{batch_norm, conv2d, conv2d_no_bias, Conv2d, Conv2dConfig, Module, VarBuilder}; #[derive(Clone, Copy, PartialEq, Debug)] pub struct Multiples { depth: f64, width: f64, ratio: f64, } impl Multiples { pub fn n() -> Self { Self { depth: 0.33, width: 0.25, ratio: 2.0, } } pub fn s() -> Self { Self { depth: 0.33, width: 0.50, ratio: 2.0, } } pub fn m() -> Self { Self { depth: 0.67, width: 0.75, ratio: 1.5, } } pub fn l() -> Self { Self { depth: 1.00, width: 1.00, ratio: 1.0, } } pub fn x() -> Self { Self { depth: 1.00, width: 1.25, ratio: 1.0, } } fn filters(&self) -> (usize, usize, usize) { let f1 = (256. * self.width) as usize; let f2 = (512. * self.width) as usize; let f3 = (512. * self.width * self.ratio) as usize; (f1, f2, f3) } } #[derive(Debug)] struct Upsample { scale_factor: usize, } impl Upsample { fn new(scale_factor: usize) -> Result<Self> { Ok(Upsample { scale_factor }) } } impl Module for Upsample { fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> { let (_b_size, _channels, h, w) = xs.dims4()?; xs.upsample_nearest2d(self.scale_factor * h, self.scale_factor * w) } } #[derive(Debug)] struct ConvBlock { conv: Conv2d, span: tracing::Span, } impl ConvBlock { fn load( vb: VarBuilder, c1: usize, c2: usize, k: usize, stride: usize, padding: Option<usize>, ) -> Result<Self> { let padding = padding.unwrap_or(k / 2); let cfg = Conv2dConfig { padding, stride, groups: 1, dilation: 1, }; let bn = batch_norm(c2, 1e-3, vb.pp("bn"))?; let conv = conv2d_no_bias(c1, c2, k, cfg, vb.pp("conv"))?.absorb_bn(&bn)?; Ok(Self { conv, span: tracing::span!(tracing::Level::TRACE, "conv-block"), }) } } impl Module for ConvBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.conv.forward(xs)?; candle_nn::ops::silu(&xs) } } #[derive(Debug)] struct Bottleneck { cv1: ConvBlock, cv2: ConvBlock, residual: bool, span: tracing::Span, } impl Bottleneck { fn load(vb: VarBuilder, c1: usize, c2: usize, shortcut: bool) -> Result<Self> { let channel_factor = 1.; let c_ = (c2 as f64 * channel_factor) as usize; let cv1 = ConvBlock::load(vb.pp("cv1"), c1, c_, 3, 1, None)?; let cv2 = ConvBlock::load(vb.pp("cv2"), c_, c2, 3, 1, None)?; let residual = c1 == c2 && shortcut; Ok(Self { cv1, cv2, residual, span: tracing::span!(tracing::Level::TRACE, "bottleneck"), }) } } impl Module for Bottleneck { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let ys = self.cv2.forward(&self.cv1.forward(xs)?)?; if self.residual { xs + ys } else { Ok(ys) } } } #[derive(Debug)] struct C2f { cv1: ConvBlock, cv2: ConvBlock, bottleneck: Vec<Bottleneck>, span: tracing::Span, } impl C2f { fn load(vb: VarBuilder, c1: usize, c2: usize, n: usize, shortcut: bool) -> Result<Self> { let c = (c2 as f64 * 0.5) as usize; let cv1 = ConvBlock::load(vb.pp("cv1"), c1, 2 * c, 1, 1, None)?; let cv2 = ConvBlock::load(vb.pp("cv2"), (2 + n) * c, c2, 1, 1, None)?; let mut bottleneck = Vec::with_capacity(n); for idx in 0..n { let b = Bottleneck::load(vb.pp(format!("bottleneck.{idx}")), c, c, shortcut)?; bottleneck.push(b) } Ok(Self { cv1, cv2, bottleneck, span: tracing::span!(tracing::Level::TRACE, "c2f"), }) } } impl Module for C2f { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let ys = self.cv1.forward(xs)?; let mut ys = ys.chunk(2, 1)?; for m in self.bottleneck.iter() { ys.push(m.forward(ys.last().unwrap())?) } let zs = Tensor::cat(ys.as_slice(), 1)?; self.cv2.forward(&zs) } } #[derive(Debug)] struct Sppf { cv1: ConvBlock, cv2: ConvBlock, k: usize, span: tracing::Span, } impl Sppf { fn load(vb: VarBuilder, c1: usize, c2: usize, k: usize) -> Result<Self> { let c_ = c1 / 2; let cv1 = ConvBlock::load(vb.pp("cv1"), c1, c_, 1, 1, None)?; let cv2 = ConvBlock::load(vb.pp("cv2"), c_ * 4, c2, 1, 1, None)?; Ok(Self { cv1, cv2, k, span: tracing::span!(tracing::Level::TRACE, "sppf"), }) } } impl Module for Sppf { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_, _, _, _) = xs.dims4()?; let xs = self.cv1.forward(xs)?; let xs2 = xs .pad_with_zeros(2, self.k / 2, self.k / 2)? .pad_with_zeros(3, self.k / 2, self.k / 2)? .max_pool2d_with_stride(self.k, 1)?; let xs3 = xs2 .pad_with_zeros(2, self.k / 2, self.k / 2)? .pad_with_zeros(3, self.k / 2, self.k / 2)? .max_pool2d_with_stride(self.k, 1)?; let xs4 = xs3 .pad_with_zeros(2, self.k / 2, self.k / 2)? .pad_with_zeros(3, self.k / 2, self.k / 2)? .max_pool2d_with_stride(self.k, 1)?; self.cv2.forward(&Tensor::cat(&[&xs, &xs2, &xs3, &xs4], 1)?) } } #[derive(Debug)] struct Dfl { conv: Conv2d, num_classes: usize, span: tracing::Span, } impl Dfl { fn load(vb: VarBuilder, num_classes: usize) -> Result<Self> { let conv = conv2d_no_bias(num_classes, 1, 1, Default::default(), vb.pp("conv"))?; Ok(Self { conv, num_classes, span: tracing::span!(tracing::Level::TRACE, "dfl"), }) } } impl Module for Dfl { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, _channels, anchors) = xs.dims3()?; let xs = xs .reshape((b_sz, 4, self.num_classes, anchors))? .transpose(2, 1)?; let xs = candle_nn::ops::softmax(&xs, 1)?; self.conv.forward(&xs)?.reshape((b_sz, 4, anchors)) } } #[derive(Debug)] struct DarkNet { b1_0: ConvBlock, b1_1: ConvBlock, b2_0: C2f, b2_1: ConvBlock, b2_2: C2f, b3_0: ConvBlock, b3_1: C2f, b4_0: ConvBlock, b4_1: C2f, b5: Sppf, span: tracing::Span, } impl DarkNet { fn load(vb: VarBuilder, m: Multiples) -> Result<Self> { let (w, r, d) = (m.width, m.ratio, m.depth); let b1_0 = ConvBlock::load(vb.pp("b1.0"), 3, (64. * w) as usize, 3, 2, Some(1))?; let b1_1 = ConvBlock::load( vb.pp("b1.1"), (64. * w) as usize, (128. * w) as usize, 3, 2, Some(1), )?; let b2_0 = C2f::load( vb.pp("b2.0"), (128. * w) as usize, (128. * w) as usize, (3. * d).round() as usize, true, )?; let b2_1 = ConvBlock::load( vb.pp("b2.1"), (128. * w) as usize, (256. * w) as usize, 3, 2, Some(1), )?; let b2_2 = C2f::load( vb.pp("b2.2"), (256. * w) as usize, (256. * w) as usize, (6. * d).round() as usize, true, )?; let b3_0 = ConvBlock::load( vb.pp("b3.0"), (256. * w) as usize, (512. * w) as usize, 3, 2, Some(1), )?; let b3_1 = C2f::load( vb.pp("b3.1"), (512. * w) as usize, (512. * w) as usize, (6. * d).round() as usize, true, )?; let b4_0 = ConvBlock::load( vb.pp("b4.0"), (512. * w) as usize, (512. * w * r) as usize, 3, 2, Some(1), )?; let b4_1 = C2f::load( vb.pp("b4.1"), (512. * w * r) as usize, (512. * w * r) as usize, (3. * d).round() as usize, true, )?; let b5 = Sppf::load( vb.pp("b5.0"), (512. * w * r) as usize, (512. * w * r) as usize, 5, )?; Ok(Self { b1_0, b1_1, b2_0, b2_1, b2_2, b3_0, b3_1, b4_0, b4_1, b5, span: tracing::span!(tracing::Level::TRACE, "darknet"), }) } fn forward(&self, xs: &Tensor) -> Result<(Tensor, Tensor, Tensor)> { let _enter = self.span.enter(); let x1 = self.b1_1.forward(&self.b1_0.forward(xs)?)?; let x2 = self .b2_2 .forward(&self.b2_1.forward(&self.b2_0.forward(&x1)?)?)?; let x3 = self.b3_1.forward(&self.b3_0.forward(&x2)?)?; let x4 = self.b4_1.forward(&self.b4_0.forward(&x3)?)?; let x5 = self.b5.forward(&x4)?; Ok((x2, x3, x5)) } } #[derive(Debug)] struct YoloV8Neck { up: Upsample, n1: C2f, n2: C2f, n3: ConvBlock, n4: C2f, n5: ConvBlock, n6: C2f, span: tracing::Span, } impl YoloV8Neck { fn load(vb: VarBuilder, m: Multiples) -> Result<Self> { let up = Upsample::new(2)?; let (w, r, d) = (m.width, m.ratio, m.depth); let n = (3. * d).round() as usize; let n1 = C2f::load( vb.pp("n1"), (512. * w * (1. + r)) as usize, (512. * w) as usize, n, false, )?; let n2 = C2f::load( vb.pp("n2"), (768. * w) as usize, (256. * w) as usize, n, false, )?; let n3 = ConvBlock::load( vb.pp("n3"), (256. * w) as usize, (256. * w) as usize, 3, 2, Some(1), )?; let n4 = C2f::load( vb.pp("n4"), (768. * w) as usize, (512. * w) as usize, n, false, )?; let n5 = ConvBlock::load( vb.pp("n5"), (512. * w) as usize, (512. * w) as usize, 3, 2, Some(1), )?; let n6 = C2f::load( vb.pp("n6"), (512. * w * (1. + r)) as usize, (512. * w * r) as usize, n, false, )?; Ok(Self { up, n1, n2, n3, n4, n5, n6, span: tracing::span!(tracing::Level::TRACE, "neck"), }) } fn forward(&self, p3: &Tensor, p4: &Tensor, p5: &Tensor) -> Result<(Tensor, Tensor, Tensor)> { let _enter = self.span.enter(); let x = self .n1 .forward(&Tensor::cat(&[&self.up.forward(p5)?, p4], 1)?)?; let head_1 = self .n2 .forward(&Tensor::cat(&[&self.up.forward(&x)?, p3], 1)?)?; let head_2 = self .n4 .forward(&Tensor::cat(&[&self.n3.forward(&head_1)?, &x], 1)?)?; let head_3 = self .n6 .forward(&Tensor::cat(&[&self.n5.forward(&head_2)?, p5], 1)?)?; Ok((head_1, head_2, head_3)) } } #[derive(Debug)] struct DetectionHead { dfl: Dfl, cv2: [(ConvBlock, ConvBlock, Conv2d); 3], cv3: [(ConvBlock, ConvBlock, Conv2d); 3], ch: usize, no: usize, span: tracing::Span, } #[derive(Debug)] struct PoseHead { detect: DetectionHead, cv4: [(ConvBlock, ConvBlock, Conv2d); 3], kpt: (usize, usize), span: tracing::Span, } fn make_anchors( xs0: &Tensor, xs1: &Tensor, xs2: &Tensor, (s0, s1, s2): (usize, usize, usize), grid_cell_offset: f64, ) -> Result<(Tensor, Tensor)> { let dev = xs0.device(); let mut anchor_points = vec![]; let mut stride_tensor = vec![]; for (xs, stride) in [(xs0, s0), (xs1, s1), (xs2, s2)] { // xs is only used to extract the h and w dimensions. let (_, _, h, w) = xs.dims4()?; let sx = (Tensor::arange(0, w as u32, dev)?.to_dtype(DType::F32)? + grid_cell_offset)?; let sy = (Tensor::arange(0, h as u32, dev)?.to_dtype(DType::F32)? + grid_cell_offset)?; let sx = sx .reshape((1, sx.elem_count()))? .repeat((h, 1))? .flatten_all()?; let sy = sy .reshape((sy.elem_count(), 1))? .repeat((1, w))? .flatten_all()?; anchor_points.push(Tensor::stack(&[&sx, &sy], D::Minus1)?); stride_tensor.push((Tensor::ones(h * w, DType::F32, dev)? * stride as f64)?); } let anchor_points = Tensor::cat(anchor_points.as_slice(), 0)?; let stride_tensor = Tensor::cat(stride_tensor.as_slice(), 0)?.unsqueeze(1)?; Ok((anchor_points, stride_tensor)) } fn dist2bbox(distance: &Tensor, anchor_points: &Tensor) -> Result<Tensor> { let chunks = distance.chunk(2, 1)?; let lt = &chunks[0]; let rb = &chunks[1]; let x1y1 = anchor_points.sub(lt)?; let x2y2 = anchor_points.add(rb)?; let c_xy = ((&x1y1 + &x2y2)? * 0.5)?; let wh = (&x2y2 - &x1y1)?; Tensor::cat(&[c_xy, wh], 1) } struct DetectionHeadOut { pred: Tensor, anchors: Tensor, strides: Tensor, } impl DetectionHead { fn load(vb: VarBuilder, nc: usize, filters: (usize, usize, usize)) -> Result<Self> { let ch = 16; let dfl = Dfl::load(vb.pp("dfl"), ch)?; let c1 = usize::max(filters.0, nc); let c2 = usize::max(filters.0 / 4, ch * 4); let cv3 = [ Self::load_cv3(vb.pp("cv3.0"), c1, nc, filters.0)?, Self::load_cv3(vb.pp("cv3.1"), c1, nc, filters.1)?, Self::load_cv3(vb.pp("cv3.2"), c1, nc, filters.2)?, ]; let cv2 = [ Self::load_cv2(vb.pp("cv2.0"), c2, ch, filters.0)?, Self::load_cv2(vb.pp("cv2.1"), c2, ch, filters.1)?, Self::load_cv2(vb.pp("cv2.2"), c2, ch, filters.2)?, ]; let no = nc + ch * 4; Ok(Self { dfl, cv2, cv3, ch, no, span: tracing::span!(tracing::Level::TRACE, "detection-head"), }) } fn load_cv3( vb: VarBuilder, c1: usize, nc: usize, filter: usize, ) -> Result<(ConvBlock, ConvBlock, Conv2d)> { let block0 = ConvBlock::load(vb.pp("0"), filter, c1, 3, 1, None)?; let block1 = ConvBlock::load(vb.pp("1"), c1, c1, 3, 1, None)?; let conv = conv2d(c1, nc, 1, Default::default(), vb.pp("2"))?; Ok((block0, block1, conv)) } fn load_cv2( vb: VarBuilder, c2: usize, ch: usize, filter: usize, ) -> Result<(ConvBlock, ConvBlock, Conv2d)> { let block0 = ConvBlock::load(vb.pp("0"), filter, c2, 3, 1, None)?; let block1 = ConvBlock::load(vb.pp("1"), c2, c2, 3, 1, None)?; let conv = conv2d(c2, 4 * ch, 1, Default::default(), vb.pp("2"))?; Ok((block0, block1, conv)) } fn forward(&self, xs0: &Tensor, xs1: &Tensor, xs2: &Tensor) -> Result<DetectionHeadOut> { let _enter = self.span.enter(); let forward_cv = |xs, i: usize| { let xs_2 = self.cv2[i].0.forward(xs)?; let xs_2 = self.cv2[i].1.forward(&xs_2)?; let xs_2 = self.cv2[i].2.forward(&xs_2)?; let xs_3 = self.cv3[i].0.forward(xs)?; let xs_3 = self.cv3[i].1.forward(&xs_3)?; let xs_3 = self.cv3[i].2.forward(&xs_3)?; Tensor::cat(&[&xs_2, &xs_3], 1) }; let xs0 = forward_cv(xs0, 0)?; let xs1 = forward_cv(xs1, 1)?; let xs2 = forward_cv(xs2, 2)?; let (anchors, strides) = make_anchors(&xs0, &xs1, &xs2, (8, 16, 32), 0.5)?; let anchors = anchors.transpose(0, 1)?.unsqueeze(0)?; let strides = strides.transpose(0, 1)?; let reshape = |xs: &Tensor| { let d = xs.dim(0)?; let el = xs.elem_count(); xs.reshape((d, self.no, el / (d * self.no))) }; let ys0 = reshape(&xs0)?; let ys1 = reshape(&xs1)?; let ys2 = reshape(&xs2)?; let x_cat = Tensor::cat(&[ys0, ys1, ys2], 2)?; let box_ = x_cat.i((.., ..self.ch * 4))?; let cls = x_cat.i((.., self.ch * 4..))?; let dbox = dist2bbox(&self.dfl.forward(&box_)?, &anchors)?; let dbox = dbox.broadcast_mul(&strides)?; let pred = Tensor::cat(&[dbox, candle_nn::ops::sigmoid(&cls)?], 1)?; Ok(DetectionHeadOut { pred, anchors, strides, }) } } impl PoseHead { // kpt: keypoints, (17, 3) // nc: num-classes, 80 fn load( vb: VarBuilder, nc: usize, kpt: (usize, usize), filters: (usize, usize, usize), ) -> Result<Self> { let detect = DetectionHead::load(vb.clone(), nc, filters)?; let nk = kpt.0 * kpt.1; let c4 = usize::max(filters.0 / 4, nk); let cv4 = [ Self::load_cv4(vb.pp("cv4.0"), c4, nk, filters.0)?, Self::load_cv4(vb.pp("cv4.1"), c4, nk, filters.1)?, Self::load_cv4(vb.pp("cv4.2"), c4, nk, filters.2)?, ]; Ok(Self { detect, cv4, kpt, span: tracing::span!(tracing::Level::TRACE, "pose-head"), }) } fn load_cv4( vb: VarBuilder, c1: usize, nc: usize, filter: usize, ) -> Result<(ConvBlock, ConvBlock, Conv2d)> { let block0 = ConvBlock::load(vb.pp("0"), filter, c1, 3, 1, None)?; let block1 = ConvBlock::load(vb.pp("1"), c1, c1, 3, 1, None)?; let conv = conv2d(c1, nc, 1, Default::default(), vb.pp("2"))?; Ok((block0, block1, conv)) } fn forward(&self, xs0: &Tensor, xs1: &Tensor, xs2: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let d = self.detect.forward(xs0, xs1, xs2)?; let forward_cv = |xs: &Tensor, i: usize| { let (b_sz, _, h, w) = xs.dims4()?; let xs = self.cv4[i].0.forward(xs)?; let xs = self.cv4[i].1.forward(&xs)?; let xs = self.cv4[i].2.forward(&xs)?; xs.reshape((b_sz, self.kpt.0 * self.kpt.1, h * w)) }; let xs0 = forward_cv(xs0, 0)?; let xs1 = forward_cv(xs1, 1)?; let xs2 = forward_cv(xs2, 2)?; let xs = Tensor::cat(&[xs0, xs1, xs2], D::Minus1)?; let (b_sz, _nk, hw) = xs.dims3()?; let xs = xs.reshape((b_sz, self.kpt.0, self.kpt.1, hw))?; let ys01 = ((xs.i((.., .., 0..2))? * 2.)?.broadcast_add(&d.anchors)? - 0.5)? .broadcast_mul(&d.strides)?; let ys2 = candle_nn::ops::sigmoid(&xs.i((.., .., 2..3))?)?; let ys = Tensor::cat(&[ys01, ys2], 2)?.flatten(1, 2)?; Tensor::cat(&[d.pred, ys], 1) } } #[derive(Debug)] pub struct YoloV8 { net: DarkNet, fpn: YoloV8Neck, head: DetectionHead, span: tracing::Span, } impl YoloV8 { pub fn load(vb: VarBuilder, m: Multiples, num_classes: usize) -> Result<Self> { let net = DarkNet::load(vb.pp("net"), m)?; let fpn = YoloV8Neck::load(vb.pp("fpn"), m)?; let head = DetectionHead::load(vb.pp("head"), num_classes, m.filters())?; Ok(Self { net, fpn, head, span: tracing::span!(tracing::Level::TRACE, "yolo-v8"), }) } } impl Module for YoloV8 { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (xs1, xs2, xs3) = self.net.forward(xs)?; let (xs1, xs2, xs3) = self.fpn.forward(&xs1, &xs2, &xs3)?; Ok(self.head.forward(&xs1, &xs2, &xs3)?.pred) } } #[derive(Debug)] pub struct YoloV8Pose { net: DarkNet, fpn: YoloV8Neck, head: PoseHead, span: tracing::Span, } impl YoloV8Pose { pub fn load( vb: VarBuilder, m: Multiples, num_classes: usize, kpt: (usize, usize), ) -> Result<Self> { let net = DarkNet::load(vb.pp("net"), m)?; let fpn = YoloV8Neck::load(vb.pp("fpn"), m)?; let head = PoseHead::load(vb.pp("head"), num_classes, kpt, m.filters())?; Ok(Self { net, fpn, head, span: tracing::span!(tracing::Level::TRACE, "yolo-v8-pose"), }) } } impl Module for YoloV8Pose { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (xs1, xs2, xs3) = self.net.forward(xs)?; let (xs1, xs2, xs3) = self.fpn.forward(&xs1, &xs2, &xs3)?; self.head.forward(&xs1, &xs2, &xs3) } }
candle/candle-examples/examples/yolo-v8/model.rs/0
{ "file_path": "candle/candle-examples/examples/yolo-v8/model.rs", "repo_id": "candle", "token_count": 12422 }
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once #include <cuda.h> #include <vector> // #include <ATen/cuda/CUDAGeneratorImpl.h> // For at::Generator and at::PhiloxCudaState constexpr int TOTAL_DIM = 0; constexpr int H_DIM = 1; constexpr int D_DIM = 2; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Qkv_params { using index_t = int64_t; // The QKV matrices. void *__restrict__ q_ptr; void *__restrict__ k_ptr; void *__restrict__ v_ptr; // The stride between rows of the Q, K and V matrices. index_t q_batch_stride; index_t k_batch_stride; index_t v_batch_stride; index_t q_row_stride; index_t k_row_stride; index_t v_row_stride; index_t q_head_stride; index_t k_head_stride; index_t v_head_stride; // The number of heads. int h, h_k; // In the case of multi-query and grouped-query attention (MQA/GQA), nheads_k could be // different from nheads (query). int h_h_k_ratio; // precompute h / h_k, }; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Flash_fwd_params : public Qkv_params { // The O matrix (output). void * __restrict__ o_ptr; void * __restrict__ oaccum_ptr; // The stride between rows of O. index_t o_batch_stride; index_t o_row_stride; index_t o_head_stride; // The pointer to the P matrix. void * __restrict__ p_ptr; // The pointer to the softmax sum. void * __restrict__ softmax_lse_ptr; void * __restrict__ softmax_lseaccum_ptr; // The dimensions. int b, seqlen_q, seqlen_k, seqlen_knew, d, seqlen_q_rounded, seqlen_k_rounded, d_rounded, rotary_dim, total_q; // The scaling factors for the kernel. float scale_softmax; float scale_softmax_log2; // array of length b+1 holding starting offset of each sequence. int * __restrict__ cu_seqlens_q; int * __restrict__ cu_seqlens_k; int * __restrict__ leftpad_k; // If provided, the actual length of each k sequence. int * __restrict__ seqused_k; int *__restrict__ blockmask; // The K_new and V_new matrices. void * __restrict__ knew_ptr; void * __restrict__ vnew_ptr; // The stride between rows of the Q, K and V matrices. index_t knew_batch_stride; index_t vnew_batch_stride; index_t knew_row_stride; index_t vnew_row_stride; index_t knew_head_stride; index_t vnew_head_stride; // The cos and sin matrices for rotary embedding. void * __restrict__ rotary_cos_ptr; void * __restrict__ rotary_sin_ptr; // The indices to index into the KV cache. int * __restrict__ cache_batch_idx; // Paged KV cache int * __restrict__ block_table; index_t block_table_batch_stride; int page_block_size; // The dropout probability (probability of keeping an activation). float p_dropout; // uint32_t p_dropout_in_uint; // uint16_t p_dropout_in_uint16_t; uint8_t p_dropout_in_uint8_t; // Scale factor of 1 / (1 - p_dropout). float rp_dropout; float scale_softmax_rp_dropout; // Local window size int window_size_left, window_size_right; float softcap; // Random state. // at::PhiloxCudaState philox_args; // Pointer to the RNG seed (idx 0) and offset (idx 1). uint64_t * rng_state; bool is_bf16; bool is_causal; // If is_seqlens_k_cumulative, then seqlen_k is cu_seqlens_k[bidb + 1] - cu_seqlens_k[bidb]. // Otherwise it's cu_seqlens_k[bidb], i.e., we use cu_seqlens_k to store the sequence lengths of K. bool is_seqlens_k_cumulative; bool is_rotary_interleaved; int num_splits; // For split-KV version void * __restrict__ alibi_slopes_ptr; index_t alibi_slopes_batch_stride; bool unpadded_lse; // For varlen paths: LSE is in [nheads, total_seqlen_q] format instead of [b, nheads, seqlen_q]. bool seqlenq_ngroups_swapped; // q has been transposed from (b, 1, (nheads_kv ngroups), d) to (b, ngroups, nheads_kv, d). }; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Flash_bwd_params : public Flash_fwd_params { // The dO and dQKV matrices. void *__restrict__ do_ptr; void *__restrict__ dq_ptr; void *__restrict__ dk_ptr; void *__restrict__ dv_ptr; // To accumulate dQ void *__restrict__ dq_accum_ptr; void *__restrict__ dk_accum_ptr; void *__restrict__ dv_accum_ptr; // // To accumulate dK and dV in case we're splitting the bwd along seqlen_q // dimension void *__restrict__ dk_accum_ptr; void *__restrict__ // dv_accum_ptr; // The stride between rows of the dO, dQ, dK and dV matrices. // TD [2022-04-16]: We're using 32-bit indexing to save registers. // The code probably won't work for arrays larger than 2GB. index_t do_batch_stride; index_t do_row_stride; index_t do_head_stride; index_t dq_batch_stride; index_t dk_batch_stride; index_t dv_batch_stride; index_t dq_row_stride; index_t dk_row_stride; index_t dv_row_stride; index_t dq_head_stride; index_t dk_head_stride; index_t dv_head_stride; // The pointer to the softmax d sum. void *__restrict__ dsoftmax_sum; bool deterministic; index_t dq_accum_split_stride; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T, int Headdim, bool Is_causal> void run_mha_fwd_(Flash_fwd_params &params, cudaStream_t stream); // template<typename T, int Headdim, bool Is_causal> void run_mha_fwd_splitkv_dispatch(Flash_fwd_params &params, cudaStream_t stream); // template<typename T, int Headdim, bool Is_causal> void run_mha_bwd_(Flash_bwd_params &params, cudaStream_t stream);
candle/candle-flash-attn/kernels/flash.h/0
{ "file_path": "candle/candle-flash-attn/kernels/flash.h", "repo_id": "candle", "token_count": 2326 }
mod ffi; use candle::backend::BackendStorage; use candle::cuda_backend::cudarc::driver::DevicePtr; use candle::cuda_backend::WrapErr; use candle::{CpuStorage, DType, Layout, Result, Shape, Tensor}; use half::{bf16, f16}; pub struct FlashAttn { pub softmax_scale: f32, pub alibi_slopes: Option<Tensor>, pub window_size_left: Option<usize>, pub window_size_right: Option<usize>, pub softcap: Option<f32>, } fn round_multiple(x: usize, m: usize) -> usize { (x + m - 1) / m * m } impl FlashAttn { fn cuda_fwd_t< T: candle::cuda_backend::CudaDType + candle::cuda_backend::cudarc::driver::DeviceRepr, >( &self, q: &candle::CudaStorage, q_l: &Layout, k: &candle::CudaStorage, k_l: &Layout, v: &candle::CudaStorage, v_l: &Layout, is_bf16: bool, ) -> Result<(candle::CudaStorage, Shape)> { // https://github.com/Dao-AILab/flash-attention/blob/b252072409e69c25f2b9d473cc534e49b24decd2/csrc/flash_attn/flash_api.cpp#L187 let dev = q.device(); let out_shape = q_l.shape().clone(); let out_l = Layout::contiguous(&out_shape); let q = q.as_cuda_slice::<T>()?; let k = k.as_cuda_slice::<T>()?; let v = v.as_cuda_slice::<T>()?; let q = q.slice(q_l.start_offset()..); let k = k.slice(k_l.start_offset()..); let v = v.slice(v_l.start_offset()..); let q_stride = q_l.stride(); let k_stride = k_l.stride(); let v_stride = v_l.stride(); let o_stride = out_l.stride(); let q_rank = q_stride.len(); let k_rank = k_stride.len(); let v_rank = v_stride.len(); let o_rank = o_stride.len(); if q_rank != 4 || k_rank != 4 || v_rank != 4 { candle::bail!( "flash-attn expects input tensors of rank 4 (q: {q_rank}, k: {k_rank}, v: {v_rank}" ) } if q_stride[q_rank - 1] != 1 { candle::bail!("the last dim of q must be contiguous {q_stride:?}") } if k_stride[k_rank - 1] != 1 { candle::bail!("the last dim of k must be contiguous {k_stride:?}") } if v_stride[v_rank - 1] != 1 { candle::bail!("the last dim of v must be contiguous {v_stride:?}") } let (b_sz, seqlen_q, num_heads, head_size_og) = q_l.shape().dims4()?; let (_b_sz, seqlen_k, num_heads_k, _head_size_og) = k_l.shape().dims4()?; let expected_kv = (b_sz, seqlen_k, num_heads_k, head_size_og); if expected_kv != k_l.shape().dims4()? { candle::bail!("shape mismatch q {:?} and k {:?}", q_l.shape(), k_l.shape()) } if expected_kv != v_l.shape().dims4()? { candle::bail!("shape mismatch q {:?} and v {:?}", q_l.shape(), v_l.shape()) } if head_size_og > 256 { candle::bail!("only supports head dimension at most 256 (got {head_size_og})") } if head_size_og % 8 != 0 { // TODO: Handle head sizes that are not a multiple of 8 via some padding. candle::bail!("only supports head sizes that are a multiple of 8 (got {head_size_og})") } if num_heads % num_heads_k != 0 { candle::bail!("number of k/v heads {num_heads_k} must divide number of heads in query {num_heads}") } let alibi_slopes_ptr = if let Some(alibi_slopes) = &self.alibi_slopes { if alibi_slopes.dtype() != DType::F32 { candle::bail!( "DType mismatch alibi_slopes {:?}, expected {:?}", alibi_slopes.dtype(), DType::F32 ); } let (alibi_slopes, alibi_slopes_layout) = alibi_slopes.storage_and_layout(); if num_heads != alibi_slopes_layout.shape().dims1()? { candle::bail!( "shape mismatch alibi_slopes {:?}, expected {:?}", alibi_slopes_layout.shape(), (num_heads) ); } let alibi_slopes = match &*alibi_slopes { candle::Storage::Cuda(c) => c.as_cuda_slice::<f32>()?, _ => candle::bail!("alibi_slopes must be a cuda tensor"), }; let alibi_slopes = alibi_slopes.slice(alibi_slopes_layout.start_offset()..); *alibi_slopes.device_ptr() as *const core::ffi::c_void } else { std::ptr::null() }; // if window_size_left > self.max_seqlen_k or None => -1 let mut window_size_left = self .window_size_left .filter(|v| v <= &seqlen_k) .map(|v| v as i32) .unwrap_or(-1); // if window_size_right > self.max_seqlen_k or None => -1 let mut window_size_right = self .window_size_right .filter(|v| v <= &seqlen_k) .map(|v| v as i32) .unwrap_or(-1); let head_size = round_multiple(head_size_og, 8); let head_size_rounded = round_multiple(head_size, 32); let seqlen_q_rounded = round_multiple(seqlen_q, 128); let seqlen_k_rounded = round_multiple(seqlen_k, 128); let elem_count = out_shape.elem_count(); let dst = unsafe { dev.alloc::<T>(elem_count) }.w()?; let softmax_lse = dev .alloc_zeros::<f32>(b_sz * 128 * num_heads * seqlen_q) .w()?; let is_bf16 = if is_bf16 { 1 } else { 0 }; // Causal is the special case where window_size_right == 0 and window_size_left < 0. // Local is the more general case where window_size_right >= 0 or window_size_left >= 0. let is_causal = if window_size_left < 0 && window_size_right == 0 { 1 } else { 0 }; if window_size_left < 0 && window_size_right >= 0 { window_size_left = seqlen_k as i32; } if window_size_left >= 0 && window_size_right < 0 { window_size_right = seqlen_k as i32; } unsafe { let q_ptr = *q.device_ptr() as *const core::ffi::c_void; let k_ptr = *k.device_ptr() as *const core::ffi::c_void; let v_ptr = *v.device_ptr() as *const core::ffi::c_void; let dst_ptr = *dst.device_ptr() as *const core::ffi::c_void; let softmax_lse_ptr = *softmax_lse.device_ptr() as *const core::ffi::c_void; ffi::run_mha( q_ptr, k_ptr, v_ptr, dst_ptr, softmax_lse_ptr, /* alibi_slopes_ptr */ alibi_slopes_ptr, /* cu_seqlens_q_ptr */ std::ptr::null(), /* cu_seqlens_k_ptr */ std::ptr::null(), /* q_batch_stride */ q_stride[0] as u32, /* k_batch_stride */ k_stride[0] as u32, /* v_batch_stride */ v_stride[0] as u32, /* o_batch_stride */ o_stride[0] as u32, /* alibi_slopes_batch_stride */ 0, /* q_row_stride */ q_stride[q_rank - 3] as u32, /* k_row_stride */ k_stride[k_rank - 3] as u32, /* v_row_stride */ v_stride[v_rank - 3] as u32, /* o_row_stride */ o_stride[o_rank - 3] as u32, /* q_head_stride */ q_stride[q_rank - 2] as u32, /* k_head_stride */ k_stride[k_rank - 2] as u32, /* v_head_stride */ v_stride[v_rank - 2] as u32, /* o_head_stride */ o_stride[o_rank - 2] as u32, /* b */ b_sz as u32, /* h */ num_heads as u32, /* h_k */ num_heads_k as u32, /* d */ head_size as u32, /* d_rounded */ head_size_rounded as u32, /* softmax_scale*/ self.softmax_scale, /* seqlen_q */ seqlen_q as u32, /* seqlen_k */ seqlen_k as u32, /* seqlen_q_rounded */ seqlen_q_rounded as u32, /* seqlen_k_rounded */ seqlen_k_rounded as u32, /* is_bf16 */ is_bf16, /* is_causal */ is_causal, /* upadded_lse */ 0, /* window_size_left */ window_size_left, /* window_size_right */ window_size_right, /* softcap */ self.softcap.unwrap_or(0f32), ) } let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev.clone()); Ok((dst, out_shape)) } } impl candle::CustomOp3 for FlashAttn { fn name(&self) -> &'static str { "flash-attn" } fn cpu_fwd( &self, _: &CpuStorage, _: &Layout, _: &CpuStorage, _: &Layout, _: &CpuStorage, _: &Layout, ) -> Result<(CpuStorage, Shape)> { candle::bail!("no cpu support for flash-attn") } fn cuda_fwd( &self, q: &candle::CudaStorage, q_l: &Layout, k: &candle::CudaStorage, k_l: &Layout, v: &candle::CudaStorage, v_l: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { match q.dtype() { candle::DType::F16 => self.cuda_fwd_t::<f16>(q, q_l, k, k_l, v, v_l, false), candle::DType::BF16 => self.cuda_fwd_t::<bf16>(q, q_l, k, k_l, v, v_l, true), dt => candle::bail!("flash-attn is only supported for f16/bf16 ({dt:?})"), } } } /// Flash-attention v2 layer. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// /// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`. pub fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { let window_size_left = None; let window_size_right = if causal { Some(0) } else { None }; let op = FlashAttn { softmax_scale, alibi_slopes: None, window_size_left, window_size_right, softcap: None, }; q.apply_op3(k, v, op) } /// Flash-attention v2 layer. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `window_size_left` - Limit left attention to value tokens. /// * `window_size_right` - Limit right attention to value tokens. /// /// # Causal mask /// /// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result /// of `Q @ K^T` /// /// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`. pub fn flash_attn_windowed( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, window_size_left: Option<usize>, window_size_right: Option<usize>, ) -> Result<Tensor> { let op = FlashAttn { softmax_scale, alibi_slopes: None, window_size_left, window_size_right, softcap: None, }; q.apply_op3(k, v, op) } /// Flash-attention v2 layer. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`. /// /// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`. pub fn flash_attn_alibi( q: &Tensor, k: &Tensor, v: &Tensor, alibi_slopes: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { let window_size_left = None; let window_size_right = if causal { Some(0) } else { None }; let op = FlashAttn { softmax_scale, alibi_slopes: Some(alibi_slopes.clone()), window_size_left, window_size_right, softcap: None, }; q.apply_op3(k, v, op) } /// Flash-attention v2 layer. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`. /// * `window_size_left` - Limit left attention to value tokens. /// * `window_size_right` - Limit right attention to value tokens. /// /// # Causal mask /// /// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result /// of `Q @ K^T` /// /// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`. pub fn flash_attn_alibi_windowed( q: &Tensor, k: &Tensor, v: &Tensor, alibi_slopes: &Tensor, softmax_scale: f32, window_size_left: Option<usize>, window_size_right: Option<usize>, ) -> Result<Tensor> { let op = FlashAttn { softmax_scale, alibi_slopes: Some(alibi_slopes.clone()), window_size_left, window_size_right, softcap: None, }; q.apply_op3(k, v, op) } /// Flash-attention v2 layer. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors `k` and `v` with fewer heads /// than `q`. The number of heads in `k` and `v` must be divisible by the number of heads in `q`. /// /// # Arguments /// /// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`. /// * `alibi_slopes` - Optional alibi slopes tensor with shape `(num_heads_q)`. /// * `softmax_scale` - Scaling factor for the softmax operation. /// * `window_size_left` - Optional limit on left attention to value tokens. /// * `window_size_right` - Optional limit on right attention to value tokens. /// * `softcap` - Gemma style softcap the attention logits before the softmax. /// /// # Causal Mask /// /// Setting `window_size_left=None` and `window_size_right=Some(0)` applies a causal mask to the result /// of `Q @ K^T`. /// /// # Returns /// /// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`. pub fn flash_attn_alibi_windowed_softcap( q: &Tensor, k: &Tensor, v: &Tensor, alibi_slopes: Option<&Tensor>, softmax_scale: f32, window_size_left: Option<usize>, window_size_right: Option<usize>, softcap: f32, ) -> Result<Tensor> { let op = FlashAttn { softmax_scale, alibi_slopes: alibi_slopes.cloned(), window_size_left, window_size_right, softcap: Some(softcap), }; q.apply_op3(k, v, op) } struct FlashAttnVarLen { pub softmax_scale: f32, pub max_seqlen_q: usize, pub max_seqlen_k: usize, pub seqlens_q: Tensor, pub seqlens_k: Tensor, pub alibi_slopes: Option<Tensor>, pub window_size_left: Option<usize>, pub window_size_right: Option<usize>, pub softcap: Option<f32>, } impl FlashAttnVarLen { fn cuda_fwd_t< T: candle::cuda_backend::CudaDType + candle::cuda_backend::cudarc::driver::DeviceRepr, >( &self, q: &candle::CudaStorage, q_l: &Layout, k: &candle::CudaStorage, k_l: &Layout, v: &candle::CudaStorage, v_l: &Layout, is_bf16: bool, ) -> Result<(candle::CudaStorage, Shape)> { // https://github.com/Dao-AILab/flash-attention/blob/184b992dcb2a0890adaa19eb9b541c3e4f9d2a08/csrc/flash_attn/flash_api.cpp#L327 let dev = q.device(); let out_shape = q_l.shape().clone(); let out_l = Layout::contiguous(&out_shape); let (seqlens_q, seqlens_q_layout) = self.seqlens_q.storage_and_layout(); let seqlens_q = match &*seqlens_q { candle::Storage::Cuda(c) => c.as_cuda_slice::<u32>()?, // Should be i32! _ => candle::bail!("seqlens_q must be a cuda tensor"), }; let seqlens_q = match seqlens_q_layout.contiguous_offsets() { Some((o1, o2)) => seqlens_q.slice(o1..o2), None => candle::bail!("seqlens_q has to be contiguous"), }; let (seqlens_k, seqlens_k_layout) = self.seqlens_k.storage_and_layout(); let seqlens_k = match &*seqlens_k { candle::Storage::Cuda(c) => c.as_cuda_slice::<u32>()?, // Should be i32! _ => candle::bail!("seqlens_k must be a cuda tensor"), }; let seqlens_k = match seqlens_k_layout.contiguous_offsets() { Some((o1, o2)) => seqlens_k.slice(o1..o2), None => candle::bail!("seqlens_k has to be contiguous"), }; let q = q.as_cuda_slice::<f16>()?; let k = k.as_cuda_slice::<f16>()?; let v = v.as_cuda_slice::<f16>()?; let q = q.slice(q_l.start_offset()..); let k = k.slice(k_l.start_offset()..); let v = v.slice(v_l.start_offset()..); let q_stride = q_l.stride(); let k_stride = k_l.stride(); let v_stride = v_l.stride(); let o_stride = out_l.stride(); let q_rank = q_stride.len(); let k_rank = k_stride.len(); let v_rank = v_stride.len(); let o_rank = o_stride.len(); if q_rank != 3 || k_rank != 3 || v_rank != 3 { candle::bail!( "flash-attn-varlen expects input tensors of rank 3 (q: {q_rank}, k: {k_rank}, v: {v_rank}" ) } if q_stride[q_rank - 1] != 1 { candle::bail!("the last dim of q must be contiguous {q_stride:?}") } if k_stride[k_rank - 1] != 1 { candle::bail!("the last dim of k must be contiguous {k_stride:?}") } if v_stride[v_rank - 1] != 1 { candle::bail!("the last dim of v must be contiguous {v_stride:?}") } let (total_q, num_heads, head_size_og) = q_l.shape().dims3()?; let (total_k, num_heads_k, _head_size_og) = k_l.shape().dims3()?; let expected_kv = (total_k, num_heads_k, head_size_og); if expected_kv != k_l.shape().dims3()? { candle::bail!("shape mismatch q {:?} and k {:?}", q_l.shape(), k_l.shape()) } if expected_kv != v_l.shape().dims3()? { candle::bail!("shape mismatch q {:?} and v {:?}", q_l.shape(), v_l.shape()) } if head_size_og > 256 { candle::bail!("only supports head dimension at most 256 (got {head_size_og})") } if head_size_og % 8 != 0 { // TODO: Handle head sizes that are not a multiple of 8 via some padding. candle::bail!("only supports head sizes that are a multiple of 8 (got {head_size_og})") } if num_heads % num_heads_k != 0 { candle::bail!("number of k/v heads {num_heads_k} must divide number of heads in query {num_heads}") } let nseqlens_q = seqlens_q_layout.shape().dims1()?; if nseqlens_q < 2 { candle::bail!("seqlens_q should have a len >= 2 {nseqlens_q}") } let nseqlens_k = seqlens_k_layout.shape().dims1()?; if nseqlens_k != nseqlens_q { candle::bail!("seqlens_q and seqlens_k should have the same number of elements {nseqlens_q} <> {nseqlens_k}") } let batch_size = nseqlens_q - 1; let alibi_slopes_ptr = if let Some(alibi_slopes) = &self.alibi_slopes { if alibi_slopes.dtype() != DType::F32 { candle::bail!( "DType mismatch alibi_slopes {:?}, expected {:?}", alibi_slopes.dtype(), DType::F32 ); } let (alibi_slopes, alibi_slopes_layout) = alibi_slopes.storage_and_layout(); if num_heads != alibi_slopes_layout.shape().dims1()? { candle::bail!( "shape mismatch alibi_slopes {:?}, expected {:?}", alibi_slopes_layout.shape(), (num_heads) ); } let alibi_slopes = match &*alibi_slopes { candle::Storage::Cuda(c) => c.as_cuda_slice::<f32>()?, _ => candle::bail!("alibi_slopes must be a cuda tensor"), }; let alibi_slopes = alibi_slopes.slice(alibi_slopes_layout.start_offset()..); *alibi_slopes.device_ptr() as *const core::ffi::c_void } else { std::ptr::null() }; // if window_size_left > self.max_seqlen_k or None => -1 let mut window_size_left = self .window_size_left .filter(|v| v <= &self.max_seqlen_k) .map(|v| v as i32) .unwrap_or(-1); // if window_size_right > self.max_seqlen_k or None => -1 let mut window_size_right = self .window_size_right .filter(|v| v <= &self.max_seqlen_k) .map(|v| v as i32) .unwrap_or(-1); let head_size = round_multiple(head_size_og, 8); let head_size_rounded = round_multiple(head_size, 32); let seqlen_q_rounded = round_multiple(self.max_seqlen_q, 128); let seqlen_k_rounded = round_multiple(self.max_seqlen_k, 128); let elem_count = out_shape.elem_count(); let dst = unsafe { dev.alloc::<f16>(elem_count) }.w()?; let softmax_lse = dev.alloc_zeros::<f32>(num_heads * total_q).w()?; let is_bf16 = if is_bf16 { 1 } else { 0 }; // Causal is the special case where window_size_right == 0 and window_size_left < 0. // Local is the more general case where window_size_right >= 0 or window_size_left >= 0. let is_causal = if window_size_left < 0 && window_size_right == 0 { 1 } else { 0 }; if window_size_left < 0 && window_size_right >= 0 { window_size_left = self.max_seqlen_k as i32; } if window_size_left >= 0 && window_size_right < 0 { window_size_right = self.max_seqlen_k as i32; } unsafe { let q_ptr = *q.device_ptr() as *const core::ffi::c_void; let k_ptr = *k.device_ptr() as *const core::ffi::c_void; let v_ptr = *v.device_ptr() as *const core::ffi::c_void; let dst_ptr = *dst.device_ptr() as *const core::ffi::c_void; let softmax_lse_ptr = *softmax_lse.device_ptr() as *const core::ffi::c_void; let seqlens_q_ptr = *seqlens_q.device_ptr() as *const core::ffi::c_int; let seqlens_k_ptr = *seqlens_k.device_ptr() as *const core::ffi::c_int; ffi::run_mha( q_ptr, k_ptr, v_ptr, dst_ptr, softmax_lse_ptr, /* alibi_slopes_ptr */ alibi_slopes_ptr, /* cu_seqlens_q_ptr */ seqlens_q_ptr, /* cu_seqlens_k_ptr */ seqlens_k_ptr, /* q_batch_stride */ 0, /* k_batch_stride */ 0, /* v_batch_stride */ 0, /* o_batch_stride */ 0, /* alibi_slopes_batch_stride */ 0, /* q_row_stride */ q_stride[q_rank - 3] as u32, /* k_row_stride */ k_stride[k_rank - 3] as u32, /* v_row_stride */ v_stride[v_rank - 3] as u32, /* o_row_stride */ o_stride[o_rank - 3] as u32, /* q_head_stride */ q_stride[q_rank - 2] as u32, /* k_head_stride */ k_stride[k_rank - 2] as u32, /* v_head_stride */ v_stride[v_rank - 2] as u32, /* o_head_stride */ o_stride[o_rank - 2] as u32, /* b */ batch_size as u32, /* h */ num_heads as u32, /* h_k */ num_heads_k as u32, /* d */ head_size as u32, /* d_rounded */ head_size_rounded as u32, /* softmax_scale*/ self.softmax_scale, /* seqlen_q */ self.max_seqlen_q as u32, /* seqlen_k */ self.max_seqlen_k as u32, /* seqlen_q_rounded */ seqlen_q_rounded as u32, /* seqlen_k_rounded */ seqlen_k_rounded as u32, /* is_bf16 */ is_bf16, /* is_causal */ is_causal, /* upadded_lse */ 1, /* window_size_left */ window_size_left, /* window_size_right */ window_size_right, /* softcap */ self.softcap.unwrap_or(0.0), ) } let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev.clone()); Ok((dst, out_shape)) } } impl candle::CustomOp3 for FlashAttnVarLen { fn name(&self) -> &'static str { "flash-attn-varlen" } fn cpu_fwd( &self, _: &CpuStorage, _: &Layout, _: &CpuStorage, _: &Layout, _: &CpuStorage, _: &Layout, ) -> Result<(CpuStorage, Shape)> { candle::bail!("no cpu support for flash-attn") } fn cuda_fwd( &self, q: &candle::CudaStorage, q_l: &Layout, k: &candle::CudaStorage, k_l: &Layout, v: &candle::CudaStorage, v_l: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { match q.dtype() { candle::DType::F16 => self.cuda_fwd_t::<f16>(q, q_l, k, k_l, v, v_l, false), candle::DType::BF16 => self.cuda_fwd_t::<bf16>(q, q_l, k, k_l, v, v_l, true), dt => candle::bail!("flash-attn is only supported for f16/bf16 ({dt:?})"), } } } #[allow(clippy::too_many_arguments)] /// Flash-attention v2 layer with variable-length batching. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q. /// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v. /// * `max_seqlen_q` - The maximum query sequence length for q in the batch. /// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch. /// /// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`, /// `seqlen_1 + seqlen_2`, etc. /// /// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`. pub fn flash_attn_varlen( q: &Tensor, k: &Tensor, v: &Tensor, seqlens_q: &Tensor, seqlens_k: &Tensor, max_seqlen_q: usize, max_seqlen_k: usize, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { let window_size_left = None; let window_size_right = if causal { Some(0) } else { None }; let op = FlashAttnVarLen { softmax_scale, max_seqlen_q, max_seqlen_k, seqlens_q: seqlens_q.clone(), seqlens_k: seqlens_k.clone(), alibi_slopes: None, window_size_left, window_size_right, softcap: None, }; q.apply_op3(k, v, op) } #[allow(clippy::too_many_arguments)] /// Flash-attention v2 layer with variable-length batching. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q. /// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v. /// * `max_seqlen_q` - The maximum query sequence length for q in the batch. /// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch. /// * `window_size_left` - Limit left attention to value tokens. /// * `window_size_right` - Limit right attention to value tokens. /// /// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`, /// `seqlen_1 + seqlen_2`, etc. /// /// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`. /// /// # Causal mask /// /// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result /// of `Q @ K^T` pub fn flash_attn_varlen_windowed( q: &Tensor, k: &Tensor, v: &Tensor, seqlens_q: &Tensor, seqlens_k: &Tensor, max_seqlen_q: usize, max_seqlen_k: usize, softmax_scale: f32, window_size_left: Option<usize>, window_size_right: Option<usize>, ) -> Result<Tensor> { let op = FlashAttnVarLen { softmax_scale, max_seqlen_q, max_seqlen_k, seqlens_q: seqlens_q.clone(), seqlens_k: seqlens_k.clone(), alibi_slopes: None, window_size_left, window_size_right, softcap: None, }; q.apply_op3(k, v, op) } #[allow(clippy::too_many_arguments)] /// Flash-attention v2 layer with variable-length batching. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`. /// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q. /// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v. /// * `max_seqlen_q` - The maximum query sequence length for q in the batch. /// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch. /// /// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`, /// `seqlen_1 + seqlen_2`, etc. /// /// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`. pub fn flash_attn_varlen_alibi( q: &Tensor, k: &Tensor, v: &Tensor, alibi_slopes: &Tensor, seqlens_q: &Tensor, seqlens_k: &Tensor, max_seqlen_q: usize, max_seqlen_k: usize, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { let window_size_left = None; let window_size_right = if causal { Some(0) } else { None }; let op = FlashAttnVarLen { softmax_scale, max_seqlen_q, max_seqlen_k, seqlens_q: seqlens_q.clone(), seqlens_k: seqlens_k.clone(), alibi_slopes: Some(alibi_slopes.clone()), window_size_left, window_size_right, softcap: None, }; q.apply_op3(k, v, op) } #[allow(clippy::too_many_arguments)] /// Flash-attention v2 layer with variable-length batching. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`. /// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q. /// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v. /// * `max_seqlen_q` - The maximum query sequence length for q in the batch. /// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch. /// * `window_size_left` - Limit left attention to value tokens. /// * `window_size_right` - Limit right attention to value tokens. /// /// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`, /// `seqlen_1 + seqlen_2`, etc. /// /// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`. /// /// # Causal mask /// /// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result /// of `Q @ K^T` pub fn flash_attn_varlen_alibi_windowed( q: &Tensor, k: &Tensor, v: &Tensor, alibi_slopes: &Tensor, seqlens_q: &Tensor, seqlens_k: &Tensor, max_seqlen_q: usize, max_seqlen_k: usize, softmax_scale: f32, window_size_left: Option<usize>, window_size_right: Option<usize>, ) -> Result<Tensor> { let op = FlashAttnVarLen { softmax_scale, max_seqlen_q, max_seqlen_k, seqlens_q: seqlens_q.clone(), seqlens_k: seqlens_k.clone(), alibi_slopes: Some(alibi_slopes.clone()), window_size_left, window_size_right, softcap: None, }; q.apply_op3(k, v, op) } #[allow(clippy::too_many_arguments)] /// Flash-attention v2 layer with variable-length batching. /// /// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`. /// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads /// than q, the number of heads in k and v has to be divisible by the number of heads in q. /// /// # Arguments /// /// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`. /// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`. /// * `alibi_slopes` - Option, alibi slopes tensor with shape `(num_heads_q)`. /// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q. /// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v. /// * `max_seqlen_q` - The maximum query sequence length for q in the batch. /// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch. /// * `window_size_left` - Option, limit left attention to value tokens. /// * `window_size_right` - Option, limit right attention to value tokens. /// * `softcap` - Gemma style softcap the attention logits before the softmax. /// /// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`, /// `seqlen_1 + seqlen_2`, etc. /// /// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`. /// /// # Causal mask /// /// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result /// of `Q @ K^T` pub fn flash_attn_varlen_alibi_windowed_softcap( q: &Tensor, k: &Tensor, v: &Tensor, alibi_slopes: Option<&Tensor>, seqlens_q: &Tensor, seqlens_k: &Tensor, max_seqlen_q: usize, max_seqlen_k: usize, softmax_scale: f32, window_size_left: Option<usize>, window_size_right: Option<usize>, softcap: f32, ) -> Result<Tensor> { let op = FlashAttnVarLen { softmax_scale, max_seqlen_q, max_seqlen_k, seqlens_q: seqlens_q.clone(), seqlens_k: seqlens_k.clone(), alibi_slopes: alibi_slopes.cloned(), window_size_left, window_size_right, softcap: Some(softcap), }; q.apply_op3(k, v, op) }
candle/candle-flash-attn/src/lib.rs/0
{ "file_path": "candle/candle-flash-attn/src/lib.rs", "repo_id": "candle", "token_count": 17752 }
#include "cuda_utils.cuh" #include <cmath> #include <stdint.h> #define WARP_SIZE 32 const int BLOCK_SIZE = 1024; // TODO: Maybe add some fast_sum_f16_f32 variant that not only accumulate in f32 // but also expect a f32 output so that this can be used for normalization e.g. // in softmax. // Fast reduce sum kernel, this assumes that the dimensions to loop over are at // the end, each block is responsible for populating one value in the output // array. There are at most 1024 threads per block. template <typename T> __device__ void fast_sum(const size_t src_numel, const size_t el_to_sum_per_block, const size_t num_dims, const size_t *info, const T *src, T *dst) { const size_t *dims = info; const size_t *strides = info + num_dims; __shared__ T shr[BLOCK_SIZE]; size_t tid = threadIdx.x; size_t dst_id = blockIdx.x; shr[tid] = 0; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); shr[tid] += src[strided_i]; idx += blockDim.x; } // Parallel reduction, see the slides: // https://www.olcf.ornl.gov/wp-content/uploads/2019/12/05_Atomics_Reductions_Warp_Shuffle.pdf // https://stackoverflow.com/questions/66078814/is-cuda-atomicadd-operation-faster-than-launch-another-kernel-when-we-do-reduce for (int s = blockDim.x / 2; s > 0; s >>= 1) { __syncthreads(); if (tid < s) shr[tid] += shr[tid + s]; } if (tid == 0) dst[dst_id] = shr[0]; } static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { a.x += __shfl_xor_sync(0xffffffff, a.x, mask, 32); a.y += __shfl_xor_sync(0xffffffff, a.y, mask, 32); } return a; } static __device__ __forceinline__ float warp_reduce_sum(float x) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { x += __shfl_xor_sync(0xffffffff, x, mask, 32); } return x; } // LayerNorm implementation adapted from ggml, accumulation is made using f32. // https://github.com/ggerganov/llama.cpp/blob/d59bd97065cd7ded6c4ecab54b1d5e0b1b11e318/ggml-cuda.cu#L477 template <typename T> __device__ void layernorm(const T * x, T * dst, const T * alpha, const T * beta, const int ncols, const int block_size, const float eps) { const int row = blockIdx.x*blockDim.y + threadIdx.y; const int tid = threadIdx.x; float2 mean_var = make_float2(0.f, 0.f); for (int col = tid; col < ncols; col += block_size) { const float xi = x[row*ncols + col]; mean_var.x += xi; mean_var.y += xi * xi; } // sum up partial sums mean_var = warp_reduce_sum(mean_var); if (block_size > WARP_SIZE) { __shared__ float2 s_sum[32]; int warp_id = threadIdx.x / WARP_SIZE; int lane_id = threadIdx.x % WARP_SIZE; if (lane_id == 0) { s_sum[warp_id] = mean_var; } __syncthreads(); mean_var = s_sum[lane_id]; mean_var = warp_reduce_sum(mean_var); } const float mean = mean_var.x / ncols; const float var = mean_var.y / ncols - mean * mean; const float inv_std = rsqrtf(var + eps); if (alpha == nullptr && beta == nullptr) { for (int col = tid; col < ncols; col += block_size) { float lhs = (static_cast<float>(x[row*ncols + col]) - mean) * inv_std; dst[row*ncols + col] = static_cast<T>(lhs); } } else if (alpha == nullptr && beta != nullptr) { for (int col = tid; col < ncols; col += block_size) { float b = static_cast<float>(beta[col]); float lhs = (static_cast<float>(x[row*ncols + col]) - mean) * inv_std; dst[row*ncols + col] = static_cast<T>(lhs + b); } } else if (alpha != nullptr && beta == nullptr) { for (int col = tid; col < ncols; col += block_size) { float a = static_cast<float>(alpha[col]); float lhs = (static_cast<float>(x[row*ncols + col]) - mean) * inv_std; dst[row*ncols + col] = static_cast<T>(lhs * a); } } else { for (int col = tid; col < ncols; col += block_size) { float a = static_cast<float>(alpha[col]); float b = static_cast<float>(beta[col]); float lhs = (static_cast<float>(x[row*ncols + col]) - mean) * inv_std; dst[row*ncols + col] = static_cast<T>(lhs * a + b); } } } // RmsNorm implementation adapted from ggml, accumulation is made using f32. // https://github.com/ggerganov/llama.cpp/blob/d59bd97065cd7ded6c4ecab54b1d5e0b1b11e318/ggml-cuda.cu#L523 template <typename T> __device__ void rmsnorm(const T * x, T * dst, const T * alpha, const int ncols, const int block_size, const float eps) { const int row = blockIdx.x*blockDim.y + threadIdx.y; const int tid = threadIdx.x; float tmp = 0.0f; // partial sum for thread in warp for (int col = tid; col < ncols; col += block_size) { const float xi = static_cast<float>(x[row*ncols + col]); tmp += xi * xi; } // sum up partial sums tmp = warp_reduce_sum(tmp); if (block_size > WARP_SIZE) { __shared__ float s_sum[32]; int warp_id = threadIdx.x / WARP_SIZE; int lane_id = threadIdx.x % WARP_SIZE; if (lane_id == 0) { s_sum[warp_id] = tmp; } __syncthreads(); tmp = s_sum[lane_id]; tmp = warp_reduce_sum(tmp); } const float mean = tmp / ncols; const float scale = rsqrtf(mean + eps); if (alpha == nullptr) { for (int col = tid; col < ncols; col += block_size) { dst[row*ncols + col] = static_cast<T>(scale * static_cast<float>(x[row*ncols + col])); } } else { for (int col = tid; col < ncols; col += block_size) { float a = static_cast<float>(alpha[col]); dst[row*ncols + col] = static_cast<T>(scale * static_cast<float>(x[row*ncols + col]) * a); } } } // Softmax implementation adapted from ggml. // https://github.com/ggerganov/llama.cpp/blob/d59bd97065cd7ded6c4ecab54b1d5e0b1b11e318/ggml-cuda.cu#L4159 template <typename T, typename ACC> __device__ void softmax(const T * x, T * dst, const int ncols) { const int row = blockDim.x*blockIdx.x + threadIdx.x; const int block_size = blockDim.y; const int tid = threadIdx.y; T max_val = -INFINITY; for (int col = tid; col < ncols; col += block_size) { const int i = row*ncols + col; max_val = maxg(max_val, x[i]); } // find the max value in the block #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { max_val = maxg(max_val, __shfl_xor_sync(0xffffffff, max_val, mask, 32)); } ACC tmp = 0.; for (int col = tid; col < ncols; col += block_size) { const int i = row*ncols + col; const T val = expg(x[i] - max_val); tmp += static_cast<ACC>(val); dst[i] = val; } // sum up partial sums #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } const ACC inv_tmp = 1. / tmp; for (int col = tid; col < ncols; col += block_size) { const int i = row*ncols + col; dst[i] *= inv_tmp; } } template <typename T> __device__ void ropei(const T * src, const T * cos, const T * sin, T * dst, const uint32_t bh, const uint32_t td) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (2 * idx >= bh * td) return; uint32_t rope_idx = idx % (td / 2); T c = cos[rope_idx]; T s = sin[rope_idx]; dst[2 * idx] = src[2 * idx] * c - src[2 * idx + 1] * s; dst[2 * idx + 1] = src[2 * idx] * s + src[2 * idx + 1] * c; } template <typename T> __device__ void rope(const T * src, const T * cos, const T * sin, T * dst, const uint32_t bh, const uint32_t td, const uint32_t d) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (2 * idx >= bh * td) return; uint32_t i_bh = idx / (td / 2); uint32_t i_td = idx - (td / 2) * i_bh; uint32_t i_t = i_td / (d / 2); uint32_t i_d = i_td - (d / 2) * i_t; uint32_t i1 = i_bh * td + i_t * d + i_d; uint32_t i2 = i1 + d / 2; uint32_t i_cs = i_t * (d / 2) + i_d; T c = cos[i_cs]; T s = sin[i_cs]; dst[i1] = src[i1] * c - src[i2] * s; dst[i2] = src[i1] * s + src[i2] * c; } template <typename T> __device__ void rope_thd( const T * src, const T * cos, const T * sin, T * dst, const uint32_t b, const uint32_t t, const uint32_t h, const uint32_t d ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (2 * idx >= b * t * h * d) return; uint32_t i_bth = idx / (d / 2); uint32_t i_d = idx - (d / 2) * i_bth; uint32_t i_t = (i_bth / h) % t; uint32_t i1 = i_bth * d + i_d; uint32_t i2 = i1 + d / 2; uint32_t i_cs = i_t * (d / 2) + i_d; T c = cos[i_cs]; T s = sin[i_cs]; dst[i1] = src[i1] * c - src[i2] * s; dst[i2] = src[i1] * s + src[i2] * c; } template <typename T> __device__ void fast_max(const size_t src_numel, const size_t el_to_sum_per_block, const size_t num_dims, const size_t *info, const T *src, T *dst) { const size_t *dims = info; const size_t *strides = info + num_dims; __shared__ T shr[BLOCK_SIZE]; size_t tid = threadIdx.x; size_t dst_id = blockIdx.x; shr[tid] = -INFINITY; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); shr[tid] = maxg(shr[tid], src[strided_i]); idx += blockDim.x; } // Parallel reduction, see the slides: // https://www.olcf.ornl.gov/wp-content/uploads/2019/12/05_Atomics_Reductions_Warp_Shuffle.pdf // https://stackoverflow.com/questions/66078814/is-cuda-atomicadd-operation-faster-than-launch-another-kernel-when-we-do-reduce for (int s = blockDim.x / 2; s > 0; s >>= 1) { __syncthreads(); if (tid < s) shr[tid] = maxg(shr[tid], shr[tid + s]); } if (tid == 0) dst[dst_id] = shr[0]; } template <typename T> __device__ void fast_min(const size_t src_numel, const size_t el_to_sum_per_block, const size_t num_dims, const size_t *info, const T *src, T *dst) { const size_t *dims = info; const size_t *strides = info + num_dims; __shared__ T shr[BLOCK_SIZE]; size_t tid = threadIdx.x; size_t dst_id = blockIdx.x; shr[tid] = INFINITY; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); shr[tid] = ming(shr[tid], src[strided_i]); idx += blockDim.x; } // Parallel reduction, see the slides: // https://www.olcf.ornl.gov/wp-content/uploads/2019/12/05_Atomics_Reductions_Warp_Shuffle.pdf // https://stackoverflow.com/questions/66078814/is-cuda-atomicadd-operation-faster-than-launch-another-kernel-when-we-do-reduce for (int s = blockDim.x / 2; s > 0; s >>= 1) { __syncthreads(); if (tid < s) shr[tid] = ming(shr[tid], shr[tid + s]); } if (tid == 0) dst[dst_id] = shr[0]; } template <typename T> __device__ void fast_argmin(const size_t src_numel, const size_t el_to_sum_per_block, const size_t num_dims, const size_t *info, const T *src, uint32_t *dst) { const size_t *dims = info; const size_t *strides = info + num_dims; __shared__ T shr[BLOCK_SIZE]; __shared__ uint32_t shr_index[BLOCK_SIZE]; size_t tid = threadIdx.x; size_t dst_id = blockIdx.x; // Not sure how that works on uint32_t and uint8_t but it seems to do ok. shr[tid] = INFINITY; shr_index[tid] = 0xFFFFFFFF; bool not_set = true; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); if (not_set || src[strided_i] < shr[tid]) { shr[tid] = src[strided_i]; // Assume that the reduction takes place over the last dimension which is contiguous. shr_index[tid] = idx % dims[num_dims - 1]; not_set = false; } idx += blockDim.x; } // Parallel reduction, see the slides: // https://www.olcf.ornl.gov/wp-content/uploads/2019/12/05_Atomics_Reductions_Warp_Shuffle.pdf // https://stackoverflow.com/questions/66078814/is-cuda-atomicadd-operation-faster-than-launch-another-kernel-when-we-do-reduce for (int s = blockDim.x / 2; s > 0; s >>= 1) { __syncthreads(); if (tid < s && shr[tid + s] < shr[tid]) { shr[tid] = shr[tid + s]; shr_index[tid] = shr_index[tid + s]; } } if (tid == 0) dst[dst_id] = shr_index[0]; } template <typename T> __device__ void fast_argmax(const size_t src_numel, const size_t el_to_sum_per_block, const size_t num_dims, const size_t *info, const T *src, uint32_t *dst) { const size_t *dims = info; const size_t *strides = info + num_dims; __shared__ T shr[BLOCK_SIZE]; __shared__ uint32_t shr_index[BLOCK_SIZE]; size_t tid = threadIdx.x; size_t dst_id = blockIdx.x; shr[tid] = -INFINITY; shr_index[tid] = 0xFFFFFFFF; bool not_set = true; // Elements summed in this block range from dst_id * el_to_sum_per_block // to (dst_id + 1) * el_to_sum_per_block. size_t start_idx = dst_id * el_to_sum_per_block; size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel); size_t idx = start_idx + tid; while (idx < stop_idx) { // TODO: Fast version for the contiguous case. size_t strided_i = get_strided_index(idx, num_dims, dims, strides); if (not_set || src[strided_i] > shr[tid]) { shr[tid] = src[strided_i]; // Assume that the reduction takes place over the last dimension which is contiguous. shr_index[tid] = idx % dims[num_dims - 1]; not_set = false; } idx += blockDim.x; } // Parallel reduction, see the slides: // https://www.olcf.ornl.gov/wp-content/uploads/2019/12/05_Atomics_Reductions_Warp_Shuffle.pdf // https://stackoverflow.com/questions/66078814/is-cuda-atomicadd-operation-faster-than-launch-another-kernel-when-we-do-reduce for (int s = blockDim.x / 2; s > 0; s >>= 1) { __syncthreads(); if (tid < s && shr[tid + s] > shr[tid]) { shr[tid] = shr[tid + s]; shr_index[tid] = shr_index[tid + s]; } } if (tid == 0) dst[dst_id] = shr_index[0]; } #define FAST_OP(TYPENAME, MIN_NAME, MAX_NAME, ARGMIN_NAME, ARGMAX_NAME, SUM_NAME) \ extern "C" __global__ void ARGMIN_NAME( \ const size_t src_numel, const size_t el_to_sum_per_block, \ const size_t num_dims, const size_t *info, const TYPENAME *src, \ uint32_t *dst) { \ fast_argmin(src_numel, el_to_sum_per_block, num_dims, info, src, dst); \ } \ extern "C" __global__ void ARGMAX_NAME( \ const size_t src_numel, const size_t el_to_sum_per_block, \ const size_t num_dims, const size_t *info, const TYPENAME *src, \ uint32_t *dst) { \ fast_argmax(src_numel, el_to_sum_per_block, num_dims, info, src, dst); \ } \ extern "C" __global__ void MIN_NAME( \ const size_t src_numel, const size_t el_to_sum_per_block, \ const size_t num_dims, const size_t *info, const TYPENAME *src, \ TYPENAME *dst) { \ fast_min(src_numel, el_to_sum_per_block, num_dims, info, src, dst); \ } \ extern "C" __global__ void MAX_NAME( \ const size_t src_numel, const size_t el_to_sum_per_block, \ const size_t num_dims, const size_t *info, const TYPENAME *src, \ TYPENAME *dst) { \ fast_max(src_numel, el_to_sum_per_block, num_dims, info, src, dst); \ } \ extern "C" __global__ void SUM_NAME( \ const size_t src_numel, const size_t el_to_sum_per_block, \ const size_t num_dims, const size_t *info, const TYPENAME *src, \ TYPENAME *dst) { \ fast_sum(src_numel, el_to_sum_per_block, num_dims, info, src, dst); \ } #define SUM_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const size_t numel, const size_t num_dims, const size_t num_sum_dims, \ const size_t *info, const TYPENAME *inp, TYPENAME *out) { \ const size_t *dims = info; \ const size_t *strides = info + num_dims; \ const size_t *sum_dims_l = info + 2 * num_dims; \ const size_t *sum_dims_s = info + 2 * num_dims + num_sum_dims; \ if (is_contiguous(num_dims, dims, strides)) { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; \ i += blockDim.x * gridDim.x) { \ size_t dst_index = i; \ for (unsigned int nd = 0; nd < num_sum_dims; ++nd) { \ size_t stride = sum_dims_s[nd]; \ size_t pre = dst_index / stride; \ size_t post = dst_index % stride; \ dst_index = (pre / sum_dims_l[nd]) * stride + post; \ } \ atomicAdd(out + dst_index, inp[i]); \ } \ } else { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; \ i += blockDim.x * gridDim.x) { \ unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \ size_t dst_index = i; \ for (unsigned int nd = 0; nd < num_sum_dims; ++nd) { \ size_t stride = sum_dims_s[nd]; \ size_t pre = dst_index / stride; \ size_t post = dst_index % stride; \ dst_index = (pre / sum_dims_l[nd]) * stride + post; \ } \ atomicAdd(out + dst_index, inp[strided_i]); \ } \ } \ } #define SOFTMAX_OP(TYPENAME, ACC_TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const TYPENAME *src, TYPENAME *dst, \ const int n_cols) { \ softmax<TYPENAME, ACC_TYPENAME>(src, dst, n_cols); \ } \ #define RMSNORM_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const TYPENAME *src, TYPENAME *dst, const TYPENAME *alpha, \ const int n_cols, const int block_size, const float eps) { \ rmsnorm<TYPENAME>(src, dst, alpha, n_cols, block_size, eps); \ } \ #define LAYERNORM_OP(TYPENAME, FN_NAME) \ extern "C" __global__ void FN_NAME( \ const TYPENAME *src, TYPENAME *dst, const TYPENAME *alpha, \ const TYPENAME *beta, const int n_cols, const int block_size, const float eps) { \ layernorm<TYPENAME>(src, dst, alpha, beta, n_cols, block_size, eps); \ } \ #define ROPE_OP(TYPENAME, FN_NAME, FN_NAME_I, FN_NAME_THD) \ extern "C" __global__ void FN_NAME_I( \ const TYPENAME *src, \ const TYPENAME *cos, \ const TYPENAME *sin, \ TYPENAME *dst, \ const uint32_t bh, \ const uint32_t td) { \ ropei<TYPENAME>(src, cos, sin, dst, bh, td); \ } \ extern "C" __global__ void FN_NAME( \ const TYPENAME *src, \ const TYPENAME *cos, \ const TYPENAME *sin, \ TYPENAME *dst, \ const uint32_t bh, \ const uint32_t td, \ const uint32_t d) { \ rope<TYPENAME>(src, cos, sin, dst, bh, td, d); \ } \ extern "C" __global__ void FN_NAME_THD( \ const TYPENAME *src, \ const TYPENAME *cos, \ const TYPENAME *sin, \ TYPENAME *dst, \ const uint32_t b, \ const uint32_t t, \ const uint32_t h, \ const uint32_t d) { \ rope_thd<TYPENAME>(src, cos, sin, dst, b, t, h, d); \ } \ #if __CUDA_ARCH__ >= 800 SOFTMAX_OP(__nv_bfloat16, float, softmax_bf16) RMSNORM_OP(__nv_bfloat16, rmsnorm_bf16) LAYERNORM_OP(__nv_bfloat16, layernorm_bf16) ROPE_OP(__nv_bfloat16, rope_bf16, rope_i_bf16, rope_thd_bf16) SUM_OP(__nv_bfloat16, sum_bf16) FAST_OP(__nv_bfloat16, fast_min_bf16, fast_max_bf16, fast_argmin_bf16, fast_argmax_bf16, fast_sum_bf16) #endif #if __CUDA_ARCH__ >= 530 SOFTMAX_OP(__half, float, softmax_f16) RMSNORM_OP(__half, rmsnorm_f16) LAYERNORM_OP(__half, layernorm_f16) ROPE_OP(__half, rope_f16, rope_i_f16, rope_thd_f16) SUM_OP(__half, sum_f16) FAST_OP(__half, fast_min_f16, fast_max_f16, fast_argmin_f16, fast_argmax_f16, fast_sum_f16) #endif SUM_OP(float, sum_f32) SUM_OP(double, sum_f64) SUM_OP(uint32_t, sum_u32) SOFTMAX_OP(float, float, softmax_f32) SOFTMAX_OP(double, double, softmax_f64) RMSNORM_OP(float, rmsnorm_f32) RMSNORM_OP(double, rmsnorm_f64) LAYERNORM_OP(float, layernorm_f32) LAYERNORM_OP(double, layernorm_f64) ROPE_OP(float, rope_f32, rope_i_f32, rope_thd_f32) ROPE_OP(double, rope_f64, rope_i_f64, rope_thd_f64) FAST_OP(float, fast_min_f32, fast_max_f32, fast_argmin_f32, fast_argmax_f32, fast_sum_f32) FAST_OP(double, fast_min_f64, fast_max_f64, fast_argmin_f64, fast_argmax_f64, fast_sum_f64) FAST_OP(uint32_t, fast_min_u32, fast_max_u32, fast_argmin_u32, fast_argmax_u32, fast_sum_u32) FAST_OP(int64_t, fast_min_i64, fast_max_i64, fast_argmin_i64, fast_argmax_i64, fast_sum_i64) FAST_OP(uint8_t, fast_min_u8, fast_max_u8, fast_argmin_u8, fast_argmax_u8, fast_sum_u8)
candle/candle-kernels/src/reduce.cu/0
{ "file_path": "candle/candle-kernels/src/reduce.cu", "repo_id": "candle", "token_count": 12783 }
// The implementation below comes from MLX. // https://github.com/ml-explore/mlx/blob/0cea88bcc5e98e81a24d92eed8870a6976999f05/mlx/backend/metal/kernels/sort.h // Copyright © 2023-2024 Apple Inc. #define MLX_MTL_CONST static constant constexpr const #define MLX_MTL_LOOP_UNROLL _Pragma("clang loop unroll(full)") #include <metal_stdlib> using namespace metal; typedef bfloat bfloat16_t; // From utils.h /////////////////////////////////////////////////////////////////////////////// // Type limits utils /////////////////////////////////////////////////////////////////////////////// template <typename U> struct Limits { static const constant U max = metal::numeric_limits<U>::max(); static const constant U min = metal::numeric_limits<U>::min(); static const constant U finite_max = metal::numeric_limits<U>::max(); static const constant U finite_min = metal::numeric_limits<U>::min(); }; #define instantiate_default_limit(type) \ template <> \ struct Limits<type> { \ static constexpr constant type max = metal::numeric_limits<type>::max(); \ static constexpr constant type min = metal::numeric_limits<type>::min(); \ static constexpr constant type finite_max = \ metal::numeric_limits<type>::max(); \ static constexpr constant type finite_min = \ metal::numeric_limits<type>::min(); \ }; instantiate_default_limit(uint8_t); instantiate_default_limit(uint16_t); instantiate_default_limit(uint32_t); instantiate_default_limit(uint64_t); instantiate_default_limit(int8_t); instantiate_default_limit(int16_t); instantiate_default_limit(int32_t); instantiate_default_limit(int64_t); #define instantiate_float_limit(type) \ template <> \ struct Limits<type> { \ static constexpr constant type max = \ metal::numeric_limits<type>::infinity(); \ static constexpr constant type min = \ -metal::numeric_limits<type>::infinity(); \ static constexpr constant type finite_max = \ metal::numeric_limits<type>::max(); \ static constexpr constant type finite_min = \ -metal::numeric_limits<type>::max(); \ }; instantiate_float_limit(half); instantiate_float_limit(float); instantiate_float_limit(bfloat16_t); template <> struct Limits<bool> { static constexpr constant bool max = true; static constexpr constant bool min = false; }; /////////////////////////////////////////////////////////////////////////////// // Single Array with generic dims template <typename IdxT = int64_t> METAL_FUNC IdxT elem_to_loc( IdxT elem, constant const int* shape, constant const int64_t* strides, int ndim) { IdxT loc = 0; for (int i = ndim - 1; i >= 0 && elem > 0; --i) { loc += (elem % shape[i]) * IdxT(strides[i]); elem /= shape[i]; } return loc; } // Non templated version to handle arbitrary dims template <typename IdxT = int64_t> METAL_FUNC IdxT elem_to_loc( uint3 elem, constant const int* shape, constant const int64_t* strides, int ndim) { IdxT loc = elem.x * IdxT(strides[ndim - 1]) + elem.y * IdxT(strides[ndim - 2]); for (int d = ndim - 3; d >= 0; --d) { loc += (elem.z % shape[d]) * IdxT(strides[d]); elem.z /= shape[d]; } return loc; } // Instantiate a templated kernel. // Extra args are used as template parameters: // e.g. instantiate_kernel(binary_int, binary, a, b) -> // [[host_name(binary_int)]] [kernel] binary<a, b> #define instantiate_kernel(name, func, ...) \ template [[host_name( \ name)]] [[kernel]] decltype(func<__VA_ARGS__>) func<__VA_ARGS__>; // Based on GPU merge sort algorithm at // https://github.com/NVIDIA/cccl/tree/main/cub/cub /////////////////////////////////////////////////////////////////////////////// // Thread-level sort /////////////////////////////////////////////////////////////////////////////// template <typename T> METAL_FUNC void thread_swap(thread T& a, thread T& b) { T w = a; a = b; b = w; } template <typename T> struct LessThan { static constexpr constant T init = Limits<T>::max; METAL_FUNC bool operator()(T a, T b) { return a < b; } }; template < typename val_t, typename idx_t, bool ARG_SORT, short N_PER_THREAD, typename CompareOp> struct ThreadSort { static METAL_FUNC void sort( thread val_t (&vals)[N_PER_THREAD], thread idx_t (&idxs)[N_PER_THREAD]) { CompareOp op; MLX_MTL_LOOP_UNROLL for (short i = 0; i < N_PER_THREAD; ++i) { MLX_MTL_LOOP_UNROLL for (short j = i & 1; j < N_PER_THREAD - 1; j += 2) { if (op(vals[j + 1], vals[j])) { thread_swap(vals[j + 1], vals[j]); thread_swap(idxs[j + 1], idxs[j]); } } } } }; /////////////////////////////////////////////////////////////////////////////// // Threadgroup-level sort /////////////////////////////////////////////////////////////////////////////// template < typename val_t, typename idx_t, bool ARG_SORT, short BLOCK_THREADS, short N_PER_THREAD, typename CompareOp> struct BlockMergeSort { using thread_sort_t = ThreadSort<val_t, idx_t, ARG_SORT, N_PER_THREAD, CompareOp>; static METAL_FUNC int merge_partition( const threadgroup val_t* As, const threadgroup val_t* Bs, short A_sz, short B_sz, short sort_md) { CompareOp op; short A_st = max(0, sort_md - B_sz); short A_ed = min(sort_md, A_sz); while (A_st < A_ed) { short md = A_st + (A_ed - A_st) / 2; auto a = As[md]; auto b = Bs[sort_md - 1 - md]; if (op(b, a)) { A_ed = md; } else { A_st = md + 1; } } return A_ed; } static METAL_FUNC void merge_step( const threadgroup val_t* As, const threadgroup val_t* Bs, const threadgroup idx_t* As_idx, const threadgroup idx_t* Bs_idx, short A_sz, short B_sz, thread val_t (&vals)[N_PER_THREAD], thread idx_t (&idxs)[N_PER_THREAD]) { CompareOp op; short a_idx = 0; short b_idx = 0; for (int i = 0; i < N_PER_THREAD; ++i) { auto a = As[a_idx]; auto b = Bs[b_idx]; bool pred = (b_idx < B_sz) && (a_idx >= A_sz || op(b, a)); vals[i] = pred ? b : a; idxs[i] = pred ? Bs_idx[b_idx] : As_idx[a_idx]; b_idx += short(pred); a_idx += short(!pred); } } static METAL_FUNC void sort( threadgroup val_t* tgp_vals [[threadgroup(0)]], threadgroup idx_t* tgp_idxs [[threadgroup(1)]], int size_sorted_axis, uint3 lid [[thread_position_in_threadgroup]]) { // Get thread location int idx = lid.x * N_PER_THREAD; // Load from shared memory thread val_t thread_vals[N_PER_THREAD]; thread idx_t thread_idxs[N_PER_THREAD]; for (int i = 0; i < N_PER_THREAD; ++i) { thread_vals[i] = tgp_vals[idx + i]; if (ARG_SORT) { thread_idxs[i] = tgp_idxs[idx + i]; } } // Per thread sort if (idx < size_sorted_axis) { thread_sort_t::sort(thread_vals, thread_idxs); } // Do merges using threadgroup memory for (int merge_threads = 2; merge_threads <= BLOCK_THREADS; merge_threads *= 2) { // Update threadgroup memory threadgroup_barrier(mem_flags::mem_threadgroup); for (int i = 0; i < N_PER_THREAD; ++i) { tgp_vals[idx + i] = thread_vals[i]; if (ARG_SORT) { tgp_idxs[idx + i] = thread_idxs[i]; } } threadgroup_barrier(mem_flags::mem_threadgroup); // Find location in merge step int merge_group = lid.x / merge_threads; int merge_lane = lid.x % merge_threads; int sort_sz = N_PER_THREAD * merge_threads; int sort_st = N_PER_THREAD * merge_threads * merge_group; // As = tgp_vals[A_st:A_ed] is sorted // Bs = tgp_vals[B_st:B_ed] is sorted int A_st = sort_st; int A_ed = sort_st + sort_sz / 2; int B_st = sort_st + sort_sz / 2; int B_ed = sort_st + sort_sz; const threadgroup val_t* As = tgp_vals + A_st; const threadgroup val_t* Bs = tgp_vals + B_st; int A_sz = A_ed - A_st; int B_sz = B_ed - B_st; // Find a partition of merge elements // Ci = merge(As[partition:], Bs[sort_md - partition:]) // of size N_PER_THREAD for each merge lane i // C = [Ci] is sorted int sort_md = N_PER_THREAD * merge_lane; int partition = merge_partition(As, Bs, A_sz, B_sz, sort_md); As += partition; Bs += sort_md - partition; A_sz -= partition; B_sz -= sort_md - partition; const threadgroup idx_t* As_idx = ARG_SORT ? tgp_idxs + A_st + partition : nullptr; const threadgroup idx_t* Bs_idx = ARG_SORT ? tgp_idxs + B_st + sort_md - partition : nullptr; // Merge starting at the partition and store results in thread registers merge_step(As, Bs, As_idx, Bs_idx, A_sz, B_sz, thread_vals, thread_idxs); } // Write out to shared memory threadgroup_barrier(mem_flags::mem_threadgroup); for (int i = 0; i < N_PER_THREAD; ++i) { tgp_vals[idx + i] = thread_vals[i]; if (ARG_SORT) { tgp_idxs[idx + i] = thread_idxs[i]; } } } }; /////////////////////////////////////////////////////////////////////////////// // Kernel sort /////////////////////////////////////////////////////////////////////////////// template < typename T, typename U, bool ARG_SORT, short BLOCK_THREADS, short N_PER_THREAD, typename CompareOp = LessThan<T>> struct KernelMergeSort { using val_t = T; using idx_t = uint; using block_merge_sort_t = BlockMergeSort< val_t, idx_t, ARG_SORT, BLOCK_THREADS, N_PER_THREAD, CompareOp>; MLX_MTL_CONST short N_PER_BLOCK = BLOCK_THREADS * N_PER_THREAD; static METAL_FUNC void block_sort( const device T* inp, device U* out, const constant int& size_sorted_axis, const constant int& in_stride_sorted_axis, const constant int& out_stride_sorted_axis, const constant int& in_stride_segment_axis, const constant int& out_stride_segment_axis, threadgroup val_t* tgp_vals, threadgroup idx_t* tgp_idxs, uint3 tid [[threadgroup_position_in_grid]], uint3 lid [[thread_position_in_threadgroup]]) { // tid.y tells us the segment index inp += tid.y * in_stride_segment_axis; out += tid.y * out_stride_segment_axis; // Copy into threadgroup memory for (short i = lid.x; i < N_PER_BLOCK; i += BLOCK_THREADS) { tgp_vals[i] = i < size_sorted_axis ? inp[i * in_stride_sorted_axis] : val_t(CompareOp::init); if (ARG_SORT) { tgp_idxs[i] = i; } } // Sort elements within the block threadgroup_barrier(mem_flags::mem_threadgroup); block_merge_sort_t::sort(tgp_vals, tgp_idxs, size_sorted_axis, lid); threadgroup_barrier(mem_flags::mem_threadgroup); // Write output for (int i = lid.x; i < size_sorted_axis; i += BLOCK_THREADS) { if (ARG_SORT) { out[i * out_stride_sorted_axis] = tgp_idxs[i]; } else { out[i * out_stride_sorted_axis] = tgp_vals[i]; } } } }; template < typename T, typename U, bool ARG_SORT, short BLOCK_THREADS, short N_PER_THREAD> [[kernel, max_total_threads_per_threadgroup(BLOCK_THREADS)]] void block_sort( const device T* inp [[buffer(0)]], device U* out [[buffer(1)]], const constant int& size_sorted_axis [[buffer(2)]], const constant int& in_stride_sorted_axis [[buffer(3)]], const constant int& out_stride_sorted_axis [[buffer(4)]], const constant int& in_stride_segment_axis [[buffer(5)]], const constant int& out_stride_segment_axis [[buffer(6)]], uint3 tid [[threadgroup_position_in_grid]], uint3 lid [[thread_position_in_threadgroup]]) { using sort_kernel = KernelMergeSort<T, U, ARG_SORT, BLOCK_THREADS, N_PER_THREAD>; using val_t = typename sort_kernel::val_t; using idx_t = typename sort_kernel::idx_t; if (ARG_SORT) { threadgroup val_t tgp_vals[sort_kernel::N_PER_BLOCK]; threadgroup idx_t tgp_idxs[sort_kernel::N_PER_BLOCK]; sort_kernel::block_sort( inp, out, size_sorted_axis, in_stride_sorted_axis, out_stride_sorted_axis, in_stride_segment_axis, out_stride_segment_axis, tgp_vals, tgp_idxs, tid, lid); } else { threadgroup val_t tgp_vals[sort_kernel::N_PER_BLOCK]; sort_kernel::block_sort( inp, out, size_sorted_axis, in_stride_sorted_axis, out_stride_sorted_axis, in_stride_segment_axis, out_stride_segment_axis, tgp_vals, nullptr, tid, lid); } } constant constexpr const int zero_helper = 0; template < typename T, typename U, bool ARG_SORT, short BLOCK_THREADS, short N_PER_THREAD> [[kernel, max_total_threads_per_threadgroup(BLOCK_THREADS)]] void block_sort_nc( const device T* inp [[buffer(0)]], device U* out [[buffer(1)]], const constant int& size_sorted_axis [[buffer(2)]], const constant int& in_stride_sorted_axis [[buffer(3)]], const constant int& out_stride_sorted_axis [[buffer(4)]], const constant int& nc_dim [[buffer(5)]], const constant int* nc_shape [[buffer(6)]], const constant int64_t* in_nc_strides [[buffer(7)]], const constant int64_t* out_nc_strides [[buffer(8)]], uint3 tid [[threadgroup_position_in_grid]], uint3 lid [[thread_position_in_threadgroup]]) { using sort_kernel = KernelMergeSort<T, U, ARG_SORT, BLOCK_THREADS, N_PER_THREAD>; using val_t = typename sort_kernel::val_t; using idx_t = typename sort_kernel::idx_t; auto in_block_idx = elem_to_loc(tid.y, nc_shape, in_nc_strides, nc_dim); auto out_block_idx = elem_to_loc(tid.y, nc_shape, out_nc_strides, nc_dim); inp += in_block_idx; out += out_block_idx; if (ARG_SORT) { threadgroup val_t tgp_vals[sort_kernel::N_PER_BLOCK]; threadgroup idx_t tgp_idxs[sort_kernel::N_PER_BLOCK]; sort_kernel::block_sort( inp, out, size_sorted_axis, in_stride_sorted_axis, out_stride_sorted_axis, zero_helper, zero_helper, tgp_vals, tgp_idxs, tid, lid); } else { threadgroup val_t tgp_vals[sort_kernel::N_PER_BLOCK]; sort_kernel::block_sort( inp, out, size_sorted_axis, in_stride_sorted_axis, out_stride_sorted_axis, zero_helper, zero_helper, tgp_vals, nullptr, tid, lid); } } template < typename val_t, typename idx_t, bool ARG_SORT, short BLOCK_THREADS, short N_PER_THREAD, typename CompareOp = LessThan<val_t>> struct KernelMultiBlockMergeSort { using block_merge_sort_t = BlockMergeSort< val_t, idx_t, ARG_SORT, BLOCK_THREADS, N_PER_THREAD, CompareOp>; MLX_MTL_CONST short N_PER_BLOCK = BLOCK_THREADS * N_PER_THREAD; static METAL_FUNC void block_sort( const device val_t* inp, device val_t* out_vals, device idx_t* out_idxs, const constant int& size_sorted_axis, const constant int& stride_sorted_axis, threadgroup val_t* tgp_vals, threadgroup idx_t* tgp_idxs, uint3 tid [[threadgroup_position_in_grid]], uint3 lid [[thread_position_in_threadgroup]]) { // tid.y tells us the segment index int base_idx = tid.x * N_PER_BLOCK; // Copy into threadgroup memory for (short i = lid.x; i < N_PER_BLOCK; i += BLOCK_THREADS) { int idx = base_idx + i; tgp_vals[i] = idx < size_sorted_axis ? inp[idx * stride_sorted_axis] : val_t(CompareOp::init); tgp_idxs[i] = idx; } // Sort elements within the block threadgroup_barrier(mem_flags::mem_threadgroup); block_merge_sort_t::sort(tgp_vals, tgp_idxs, size_sorted_axis, lid); threadgroup_barrier(mem_flags::mem_threadgroup); // Write output for (int i = lid.x; i < N_PER_BLOCK; i += BLOCK_THREADS) { int idx = base_idx + i; if (idx < size_sorted_axis) { out_vals[idx] = tgp_vals[i]; out_idxs[idx] = tgp_idxs[i]; } } } static METAL_FUNC int merge_partition( const device val_t* As, const device val_t* Bs, int A_sz, int B_sz, int sort_md) { CompareOp op; int A_st = max(0, sort_md - B_sz); int A_ed = min(sort_md, A_sz); while (A_st < A_ed) { int md = A_st + (A_ed - A_st) / 2; auto a = As[md]; auto b = Bs[sort_md - 1 - md]; if (op(b, a)) { A_ed = md; } else { A_st = md + 1; } } return A_ed; } }; template < typename val_t, typename idx_t, bool ARG_SORT, short BLOCK_THREADS, short N_PER_THREAD> [[kernel, max_total_threads_per_threadgroup(BLOCK_THREADS)]] void mb_block_sort( const device val_t* inp [[buffer(0)]], device val_t* out_vals [[buffer(1)]], device idx_t* out_idxs [[buffer(2)]], const constant int& size_sorted_axis [[buffer(3)]], const constant int& stride_sorted_axis [[buffer(4)]], const constant int& nc_dim [[buffer(5)]], const constant int* nc_shape [[buffer(6)]], const constant int64_t* nc_strides [[buffer(7)]], uint3 tid [[threadgroup_position_in_grid]], uint3 lid [[thread_position_in_threadgroup]]) { using sort_kernel = KernelMultiBlockMergeSort< val_t, idx_t, ARG_SORT, BLOCK_THREADS, N_PER_THREAD>; auto block_idx = elem_to_loc(tid.y, nc_shape, nc_strides, nc_dim); inp += block_idx; out_vals += tid.y * size_sorted_axis; out_idxs += tid.y * size_sorted_axis; threadgroup val_t tgp_vals[sort_kernel::N_PER_BLOCK]; threadgroup idx_t tgp_idxs[sort_kernel::N_PER_BLOCK]; sort_kernel::block_sort( inp, out_vals, out_idxs, size_sorted_axis, stride_sorted_axis, tgp_vals, tgp_idxs, tid, lid); } template < typename val_t, typename idx_t, bool ARG_SORT, short BLOCK_THREADS, short N_PER_THREAD> [[kernel]] void mb_block_partition( device idx_t* block_partitions [[buffer(0)]], const device val_t* dev_vals [[buffer(1)]], const device idx_t* dev_idxs [[buffer(2)]], const constant int& size_sorted_axis [[buffer(3)]], const constant int& merge_tiles [[buffer(4)]], const constant int& n_blocks [[buffer(5)]], uint3 tid [[threadgroup_position_in_grid]], uint3 lid [[thread_position_in_threadgroup]], uint3 tgp_dims [[threads_per_threadgroup]]) { using sort_kernel = KernelMultiBlockMergeSort< val_t, idx_t, ARG_SORT, BLOCK_THREADS, N_PER_THREAD>; block_partitions += tid.y * tgp_dims.x; dev_vals += tid.y * size_sorted_axis; dev_idxs += tid.y * size_sorted_axis; for (int i = lid.x; i <= n_blocks; i += tgp_dims.x) { // Find location in merge step int merge_group = i / merge_tiles; int merge_lane = i % merge_tiles; int sort_sz = sort_kernel::N_PER_BLOCK * merge_tiles; int sort_st = sort_kernel::N_PER_BLOCK * merge_tiles * merge_group; int A_st = min(size_sorted_axis, sort_st); int A_ed = min(size_sorted_axis, sort_st + sort_sz / 2); int B_st = A_ed; int B_ed = min(size_sorted_axis, B_st + sort_sz / 2); int partition_at = min(B_ed - A_st, sort_kernel::N_PER_BLOCK * merge_lane); int partition = sort_kernel::merge_partition( dev_vals + A_st, dev_vals + B_st, A_ed - A_st, B_ed - B_st, partition_at); block_partitions[i] = A_st + partition; } } template < typename val_t, typename idx_t, bool ARG_SORT, short BLOCK_THREADS, short N_PER_THREAD, typename CompareOp = LessThan<val_t>> [[kernel, max_total_threads_per_threadgroup(BLOCK_THREADS)]] void mb_block_merge( const device idx_t* block_partitions [[buffer(0)]], const device val_t* dev_vals_in [[buffer(1)]], const device idx_t* dev_idxs_in [[buffer(2)]], device val_t* dev_vals_out [[buffer(3)]], device idx_t* dev_idxs_out [[buffer(4)]], const constant int& size_sorted_axis [[buffer(5)]], const constant int& merge_tiles [[buffer(6)]], const constant int& num_tiles [[buffer(7)]], uint3 tid [[threadgroup_position_in_grid]], uint3 lid [[thread_position_in_threadgroup]]) { using sort_kernel = KernelMultiBlockMergeSort< val_t, idx_t, ARG_SORT, BLOCK_THREADS, N_PER_THREAD, CompareOp>; using block_sort_t = typename sort_kernel::block_merge_sort_t; block_partitions += tid.y * (num_tiles + 1); dev_vals_in += tid.y * size_sorted_axis; dev_idxs_in += tid.y * size_sorted_axis; dev_vals_out += tid.y * size_sorted_axis; dev_idxs_out += tid.y * size_sorted_axis; int block_idx = tid.x; int merge_group = block_idx / merge_tiles; int sort_st = sort_kernel::N_PER_BLOCK * merge_tiles * merge_group; int sort_sz = sort_kernel::N_PER_BLOCK * merge_tiles; int sort_md = sort_kernel::N_PER_BLOCK * block_idx - sort_st; int A_st = block_partitions[block_idx + 0]; int A_ed = block_partitions[block_idx + 1]; int B_st = min(size_sorted_axis, 2 * sort_st + sort_sz / 2 + sort_md - A_st); int B_ed = min( size_sorted_axis, 2 * sort_st + sort_sz / 2 + sort_md + sort_kernel::N_PER_BLOCK - A_ed); if ((block_idx % merge_tiles) == merge_tiles - 1) { A_ed = min(size_sorted_axis, sort_st + sort_sz / 2); B_ed = min(size_sorted_axis, sort_st + sort_sz); } int A_sz = A_ed - A_st; int B_sz = B_ed - B_st; // Load from global memory thread val_t thread_vals[N_PER_THREAD]; thread idx_t thread_idxs[N_PER_THREAD]; for (int i = 0; i < N_PER_THREAD; i++) { int idx = BLOCK_THREADS * i + lid.x; if (idx < (A_sz + B_sz)) { thread_vals[i] = (idx < A_sz) ? dev_vals_in[A_st + idx] : dev_vals_in[B_st + idx - A_sz]; thread_idxs[i] = (idx < A_sz) ? dev_idxs_in[A_st + idx] : dev_idxs_in[B_st + idx - A_sz]; } else { thread_vals[i] = CompareOp::init; thread_idxs[i] = 0; } } // Write to shared memory threadgroup val_t tgp_vals[sort_kernel::N_PER_BLOCK]; threadgroup idx_t tgp_idxs[sort_kernel::N_PER_BLOCK]; threadgroup_barrier(mem_flags::mem_threadgroup); for (int i = 0; i < N_PER_THREAD; i++) { int idx = BLOCK_THREADS * i + lid.x; tgp_vals[idx] = thread_vals[i]; tgp_idxs[idx] = thread_idxs[i]; } threadgroup_barrier(mem_flags::mem_threadgroup); // Merge int sort_md_local = min(A_sz + B_sz, N_PER_THREAD * int(lid.x)); int A_st_local = block_sort_t::merge_partition( tgp_vals, tgp_vals + A_sz, A_sz, B_sz, sort_md_local); int A_ed_local = A_sz; int B_st_local = sort_md_local - A_st_local; int B_ed_local = B_sz; int A_sz_local = A_ed_local - A_st_local; int B_sz_local = B_ed_local - B_st_local; // Do merge block_sort_t::merge_step( tgp_vals + A_st_local, tgp_vals + A_ed_local + B_st_local, tgp_idxs + A_st_local, tgp_idxs + A_ed_local + B_st_local, A_sz_local, B_sz_local, thread_vals, thread_idxs); threadgroup_barrier(mem_flags::mem_threadgroup); for (int i = 0; i < N_PER_THREAD; ++i) { int idx = lid.x * N_PER_THREAD; tgp_vals[idx + i] = thread_vals[i]; tgp_idxs[idx + i] = thread_idxs[i]; } threadgroup_barrier(mem_flags::mem_threadgroup); // Write output int base_idx = tid.x * sort_kernel::N_PER_BLOCK; for (int i = lid.x; i < sort_kernel::N_PER_BLOCK; i += BLOCK_THREADS) { int idx = base_idx + i; if (idx < size_sorted_axis) { dev_vals_out[idx] = tgp_vals[i]; dev_idxs_out[idx] = tgp_idxs[i]; } } } #define instantiate_block_sort( \ name, itname, itype, otname, otype, arg_sort, bn, tn) \ instantiate_kernel("c" #name "_" #itname "_" #otname "_bn" #bn "_tn" #tn, \ block_sort, itype, otype, arg_sort, bn, tn) \ instantiate_kernel("nc" #name "_" #itname "_" #otname "_bn" #bn "_tn" #tn, \ block_sort_nc, itype, otype, arg_sort, bn, tn) #define instantiate_arg_block_sort_base(itname, itype, bn, tn) \ instantiate_block_sort( \ arg_block_sort, itname, itype, uint32, uint32_t, true, bn, tn) #define instantiate_block_sort_base(itname, itype, bn, tn) \ instantiate_block_sort( \ _block_sort, itname, itype, itname, itype, false, bn, tn) #define instantiate_block_sort_tn(itname, itype, bn) \ instantiate_block_sort_base(itname, itype, bn, 8) \ instantiate_arg_block_sort_base(itname, itype, bn, 8) #define instantiate_block_sort_bn(itname, itype) \ instantiate_block_sort_tn(itname, itype, 128) \ instantiate_block_sort_tn(itname, itype, 256) \ instantiate_block_sort_tn(itname, itype, 512) instantiate_block_sort_bn(uint8, uint8_t) instantiate_block_sort_bn(uint32, uint32_t) instantiate_block_sort_bn(float16, half) instantiate_block_sort_bn(float32, float) instantiate_block_sort_bn(bfloat16, bfloat16_t) #define instantiate_block_sort_long(itname, itype) \ instantiate_block_sort_tn(itname, itype, 128) \ instantiate_block_sort_tn(itname, itype, 256) instantiate_block_sort_long(int64, int64_t) #define instantiate_multi_block_sort( \ vtname, vtype, itname, itype, arg_sort, bn, tn) \ instantiate_kernel("sort_mbsort_" #vtname "_" #itname "_bn" #bn "_tn" #tn, \ mb_block_sort, vtype, itype, arg_sort, bn, tn) \ instantiate_kernel("partition_mbsort_" #vtname "_" #itname "_bn" #bn "_tn" #tn, \ mb_block_partition, vtype, itype, arg_sort, bn, tn) \ instantiate_kernel("merge_mbsort_" #vtname "_" #itname "_bn" #bn "_tn" #tn, \ mb_block_merge, vtype, itype, arg_sort, bn, tn) #define instantiate_multi_block_sort_base(vtname, vtype) \ instantiate_multi_block_sort(vtname, vtype, uint32, uint32_t, true, 512, 8) instantiate_multi_block_sort_base(uint8, uint8_t) instantiate_multi_block_sort_base(uint32, uint32_t) instantiate_multi_block_sort_base(float16, half) instantiate_multi_block_sort_base(float32, float) instantiate_multi_block_sort_base(bfloat16, bfloat16_t) #define instantiate_multi_block_sort_long(vtname, vtype) \ instantiate_multi_block_sort(vtname, vtype, uint32, uint32_t, true, 256, 8) instantiate_multi_block_sort_long(int64, int64_t) // clang-format on
candle/candle-metal-kernels/src/mlx_sort.metal/0
{ "file_path": "candle/candle-metal-kernels/src/mlx_sort.metal", "repo_id": "candle", "token_count": 12675 }
[package] name = "candle-nn" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } candle = { workspace = true } half = { workspace = true } thiserror = { workspace = true } intel-mkl-src = { workspace = true, optional = true } num-traits = { workspace = true } rayon = { workspace = true } safetensors = { workspace = true } serde = { workspace = true } metal = { workspace = true, optional = true } candle-metal-kernels = { workspace = true, optional = true } [dev-dependencies] anyhow = { workspace = true } clap = { workspace = true } rand = { workspace = true } rand_distr = { workspace = true } criterion = { workspace = true } [features] default = [] accelerate = ["dep:accelerate-src", "candle/accelerate"] cuda = ["candle/cuda"] mkl = ["dep:intel-mkl-src", "candle/mkl"] metal = ["candle/metal", "dep:candle-metal-kernels", "dep:metal"] [[bench]] name = "bench_main" harness = false
candle/candle-nn/Cargo.toml/0
{ "file_path": "candle/candle-nn/Cargo.toml", "repo_id": "candle", "token_count": 371 }
//! Variable initialization. // This is based on: // https://github.com/pytorch/pytorch/blob/07107919297db3f8ab37f11c12666b6d6d5f692e/torch/nn/init.py# use candle::{DType, Device, Result, Shape, Tensor, Var}; /// Number of features as input or output of a layer. /// In Kaiming initialization, choosing `FanIn` preserves /// the magnitude of the variance of the weights in the /// forward pass, choosing `FanOut` preserves this /// magnitude in the backward pass. #[derive(Debug, Copy, Clone)] pub enum FanInOut { FanIn, FanOut, } impl FanInOut { /// Compute the fan-in or fan-out value for a weight tensor of /// the specified dimensions. /// <https://github.com/pytorch/pytorch/blob/dbeacf11820e336e803bb719b7aaaf2125ae4d9c/torch/nn/init.py#L284> pub fn for_shape(&self, shape: &Shape) -> usize { let dims = shape.dims(); let receptive_field_size: usize = dims.iter().skip(2).product(); match &self { FanInOut::FanIn => { if dims.len() < 2 { 1 } else { dims[1] * receptive_field_size } } FanInOut::FanOut => { if dims.is_empty() { 1 } else { dims[0] * receptive_field_size } } } } } #[derive(Debug, Copy, Clone)] pub enum NormalOrUniform { Normal, Uniform, } /// The non-linear function that follows this layer. ReLU is the /// recommended value. #[derive(Debug, Copy, Clone)] pub enum NonLinearity { ReLU, Linear, Sigmoid, Tanh, SELU, ExplicitGain(f64), } impl NonLinearity { // https://github.com/pytorch/pytorch/blob/07107919297db3f8ab37f11c12666b6d6d5f692e/torch/nn/init.py#L67 pub fn gain(&self) -> f64 { match *self { NonLinearity::ReLU => 2f64.sqrt(), NonLinearity::Tanh => 5. / 3., NonLinearity::Linear | NonLinearity::Sigmoid => 1., NonLinearity::SELU => 0.75, NonLinearity::ExplicitGain(g) => g, } } } /// Variable initializations. #[derive(Debug, Copy, Clone)] pub enum Init { /// Constant value. Const(f64), /// Random normal with some mean and standard deviation. Randn { mean: f64, stdev: f64 }, /// Uniform initialization between some lower and upper bounds. Uniform { lo: f64, up: f64 }, /// Kaiming uniform initialization. /// See "Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification" /// He, K. et al. (2015). This uses a uniform distribution. Kaiming { dist: NormalOrUniform, fan: FanInOut, non_linearity: NonLinearity, }, } pub const ZERO: Init = Init::Const(0.); pub const ONE: Init = Init::Const(1.); pub const DEFAULT_KAIMING_UNIFORM: Init = Init::Kaiming { dist: NormalOrUniform::Uniform, fan: FanInOut::FanIn, non_linearity: NonLinearity::ReLU, }; pub const DEFAULT_KAIMING_NORMAL: Init = Init::Kaiming { dist: NormalOrUniform::Normal, fan: FanInOut::FanIn, non_linearity: NonLinearity::ReLU, }; impl Init { /// Creates a new tensor with the specified shape, device, and initialization. pub fn var<S: Into<Shape>>(&self, s: S, dtype: DType, device: &Device) -> Result<Var> { match self { Self::Const(v) if *v == 0. => Var::zeros(s, dtype, device), Self::Const(v) if *v == 1. => Var::ones(s, dtype, device), Self::Const(cst) => { Var::from_tensor(&Tensor::ones(s, dtype, device)?.affine(*cst, 0.)?) } Self::Uniform { lo, up } => Var::rand_f64(*lo, *up, s, dtype, device), Self::Randn { mean, stdev } => Var::randn_f64(*mean, *stdev, s, dtype, device), Self::Kaiming { dist, fan, non_linearity, } => { let s = s.into(); let fan = fan.for_shape(&s); let gain = non_linearity.gain(); let std = gain / (fan as f64).sqrt(); match dist { NormalOrUniform::Uniform => { let bound = 3f64.sqrt() * std; Var::rand_f64(-bound, bound, s, dtype, device) } NormalOrUniform::Normal => Var::randn_f64(0., std, s, dtype, device), } } } } } impl Default for Init { fn default() -> Self { Self::Const(0.) } }
candle/candle-nn/src/init.rs/0
{ "file_path": "candle/candle-nn/src/init.rs", "repo_id": "candle", "token_count": 2212 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle::{test_utils, Device, Tensor}; use candle_nn::{LayerNorm, Module}; #[test] fn layer_norm() -> Result<()> { let device = &Device::Cpu; let w = Tensor::new(&[3f32], device)?; let b = Tensor::new(&[0.5f32], device)?; let ln2 = LayerNorm::new(Tensor::cat(&[&w, &w], 0)?, Tensor::cat(&[&b, &b], 0)?, 1e-8); let ln3 = LayerNorm::new( Tensor::cat(&[&w, &w, &w], 0)?, Tensor::cat(&[&b, &b, &b], 0)?, 1e-8, ); let ln = LayerNorm::new(w, b, 1e-8); let two = Tensor::new(&[[[2f32]]], device)?; let res = ln.forward(&two)?.flatten_all()?; assert_eq!(res.to_vec1::<f32>()?, [0.5f32]); let inp = Tensor::new(&[[[4f32, 0f32]]], device)?; let res = ln2.forward(&inp)?; assert_eq!(res.to_vec3::<f32>()?, [[[3.5f32, -2.5]]]); let inp = Tensor::new(&[[[1f32, 2., 3.], [4., 5., 6.], [9., 8., 7.]]], device)?; let res = ln3.forward(&inp)?; assert_eq!( test_utils::to_vec3_round(&res, 4)?, [[ [-3.1742, 0.5, 4.1742], [-3.1742, 0.5, 4.1742], [4.1742, 0.5, -3.1742] ]] ); let mean = (res.sum_keepdim(2)? / 3.0)?; // The average value should be `b`. assert_eq!( test_utils::to_vec3_round(&mean, 4)?, [[[0.5], [0.5], [0.5]]] ); let std = (res.broadcast_sub(&mean)?.sqr()?.sum_keepdim(2)?.sqrt()? / 3.0)?; // The standard deviation should be sqrt(`w`). assert_eq!( test_utils::to_vec3_round(&std, 4)?, [[[1.7321], [1.7321], [1.7321]]] ); Ok(()) }
candle/candle-nn/tests/layer_norm.rs/0
{ "file_path": "candle/candle-nn/tests/layer_norm.rs", "repo_id": "candle", "token_count": 892 }
## Installation From the `candle-pyo3` directory, enable a virtual env where you will want the candle package to be installed then run. ```bash maturin develop -r python test.py ``` ## Generating Stub Files for Type Hinting For type hinting support, the `candle-pyo3` package requires `*.pyi` files. You can automatically generate these files using the `stub.py` script. ### Steps: 1. Install the package using `maturin`. 2. Generate the stub files by running: ``` python stub.py ``` ### Validation: To ensure that the stub files match the current implementation, execute: ``` python stub.py --check ```
candle/candle-pyo3/README.md/0
{ "file_path": "candle/candle-pyo3/README.md", "repo_id": "candle", "token_count": 190 }
import candle from candle import Tensor from .module import Module from typing import Union, List, Tuple, Optional, Any _shape_t = Union[int, List[int]] import numbers class LayerNorm(Module): r"""Applies Layer Normalization over a mini-batch of inputs as described in the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>` math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta """ __constants__ = ["normalized_shape", "eps"] normalized_shape: Tuple[int, ...] eps: float def __init__( self, normalized_shape: _shape_t, eps: float = 1e-5, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = (normalized_shape,) self.normalized_shape = tuple(normalized_shape) self.eps = eps self.weight = candle.ones(normalized_shape, **factory_kwargs) if bias: self.bias = candle.zeros(normalized_shape, **factory_kwargs) else: self.bias = None def forward(self, input: Tensor) -> Tensor: mean_x = input.sum_keepdim(2) / float(self.normalized_shape[-1]) x = input.broadcast_sub(mean_x) norm_x = x.sqr().sum_keepdim(2) / float(self.normalized_shape[-1]) x_normed = x.broadcast_div((norm_x + self.eps).sqrt()) x = x_normed.broadcast_mul(self.weight) if self.bias: x = x.broadcast_add(self.bias) return x def extra_repr(self) -> str: return "{normalized_shape}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(**self.__dict__)
candle/candle-pyo3/py_src/candle/nn/normalization.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/normalization.py", "repo_id": "candle", "token_count": 803 }
import candle import torch # convert from candle tensor to torch tensor t = candle.randn((3, 512, 512)) torch_tensor = t.to_torch() print(torch_tensor) print(type(torch_tensor)) # convert from torch tensor to candle tensor t = torch.randn((3, 512, 512)) candle_tensor = candle.Tensor(t) print(candle_tensor) print(type(candle_tensor))
candle/candle-pyo3/test_pytorch.py/0
{ "file_path": "candle/candle-pyo3/test_pytorch.py", "repo_id": "candle", "token_count": 126 }
//! Based on the BLIP paper from Salesforce Research. //! //! The blip-image-captioning model can generate captions for an input image. //! //! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-BLIP-Image-Captioning) //! - 💻 [GH Link](https://github.com/salesforce/BLIP) //! - 🤗 [HF Link](https://huggingface.co/Salesforce/blip-image-captioning-base) //! - 📝 [Paper](https://arxiv.org/abs/2201.12086) //! use super::blip_text; use super::with_tracing::{conv2d, linear, Conv2d, Linear}; use candle::{Module, Result, Tensor, D}; use candle_nn::{layer_norm, Conv2dConfig, LayerNorm, VarBuilder}; use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct VisionConfig { pub hidden_size: usize, pub intermediate_size: usize, pub projection_dim: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub image_size: usize, pub patch_size: usize, pub hidden_act: candle_nn::Activation, pub layer_norm_eps: f64, } #[derive(Debug, Clone, Deserialize)] pub struct Config { pub text_config: blip_text::Config, pub vision_config: VisionConfig, pub projection_dim: usize, pub image_text_hidden_size: usize, } impl Config { pub fn image_captioning_large() -> Self { let text_config = blip_text::Config { vocab_size: 30524, hidden_size: 768, encoder_hidden_size: 1024, intermediate_size: 3072, projection_dim: 768, num_hidden_layers: 12, num_attention_heads: 12, max_position_embeddings: 512, hidden_act: candle_nn::Activation::Gelu, layer_norm_eps: 1e-12, is_decoder: true, }; let vision_config = VisionConfig { hidden_size: 1024, intermediate_size: 4096, projection_dim: 512, num_hidden_layers: 24, num_attention_heads: 16, image_size: 384, patch_size: 16, hidden_act: candle_nn::Activation::Gelu, layer_norm_eps: 1e-5, }; Self { text_config, vision_config, projection_dim: 512, image_text_hidden_size: 256, } } } #[derive(Debug, Clone)] struct VisionEmbeddings { class_embedding: Tensor, patch_embedding: Conv2d, position_embedding: Tensor, } impl VisionEmbeddings { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let class_embedding = vb.get((1, 1, cfg.hidden_size), "class_embedding")?; let conv_cfg = Conv2dConfig { stride: cfg.patch_size, ..Default::default() }; let patch_embedding = conv2d( 3, cfg.hidden_size, cfg.patch_size, conv_cfg, vb.pp("patch_embedding"), )?; let num_patches1 = cfg.image_size / cfg.patch_size; let num_patches = num_patches1 * num_patches1; let num_positions = num_patches + 1; let position_embedding = vb.get((1, num_positions, cfg.hidden_size), "position_embedding")?; Ok(Self { class_embedding, patch_embedding, position_embedding, }) } } impl Module for VisionEmbeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let target_dtype = xs.dtype(); let b_size = xs.dim(0)?; let patch_embeds = xs.apply(&self.patch_embedding)?.flatten_from(2)?.t()?; let d = self.class_embedding.dim(D::Minus1)?; let class_embeds = self .class_embedding .broadcast_as((b_size, 1, d))? .to_dtype(target_dtype)?; let embeddings = Tensor::cat(&[&class_embeds, &patch_embeds], 1)?; let position_embedding = self.position_embedding.narrow(1, 0, embeddings.dim(1)?)?; embeddings.broadcast_add(&position_embedding) } } #[derive(Debug, Clone)] struct Attention { qkv: Linear, projection: Linear, scale: f64, num_heads: usize, } impl Attention { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let head_dim = embed_dim / num_heads; let scale = 1f64 / (head_dim as f64).sqrt(); let qkv = linear(embed_dim, 3 * embed_dim, vb.pp("qkv"))?; let projection = linear(embed_dim, embed_dim, vb.pp("projection"))?; Ok(Self { qkv, projection, scale, num_heads, }) } fn forward(&self, xs: &Tensor, attn_mask: Option<&Tensor>) -> Result<Tensor> { let (b_sz, tgt_len, embed_dim) = xs.dims3()?; let mixed_qkv = xs .apply(&self.qkv)? .reshape((b_sz, tgt_len, 3, self.num_heads, embed_dim / self.num_heads))? .permute((2, 0, 3, 1, 4))?; let query = mixed_qkv.get(0)?; let key = mixed_qkv.get(1)?; let value = mixed_qkv.get(2)?; let attention_scores = query.matmul(&key.t()?)?; let attention_scores = (attention_scores * self.scale)?; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; let attention_probs = match attn_mask { None => attention_probs, Some(attn_mask) => (attention_probs * attn_mask)?, }; attention_probs .matmul(&value)? .permute((0, 2, 1, 3))? .flatten_from(D::Minus2)? .apply(&self.projection) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { activation_fn: candle_nn::Activation, fc1: Linear, fc2: Linear, } impl MLP { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let fc1 = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("fc1"))?; let fc2 = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?; Ok(Self { activation_fn: cfg.hidden_act, fc1, fc2, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.fc1)? .apply(&self.activation_fn)? .apply(&self.fc2) } } #[derive(Debug, Clone)] struct EncoderLayer { self_attn: Attention, layer_norm1: LayerNorm, mlp: MLP, layer_norm2: LayerNorm, } impl EncoderLayer { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size; let self_attn = Attention::new(cfg, vb.pp("self_attn"))?; let layer_norm1 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm1"))?; let layer_norm2 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm2"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.layer_norm1)?; let xs = self.self_attn.forward(&xs, attention_mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.layer_norm2)?.apply(&self.mlp)?; xs + residual } } #[derive(Debug, Clone)] struct Encoder { layers: Vec<EncoderLayer>, } impl Encoder { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb = vb.pp("layers"); for i in 0..cfg.num_hidden_layers { let layer = EncoderLayer::new(cfg, vb.pp(i))?; layers.push(layer) } Ok(Self { layers }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, attention_mask)? } Ok(xs) } } #[derive(Debug, Clone)] pub struct VisionModel { embeddings: VisionEmbeddings, encoder: Encoder, post_layernorm: LayerNorm, } impl VisionModel { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embeddings = VisionEmbeddings::new(cfg, vb.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let post_layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_layernorm"))?; Ok(Self { embeddings, encoder, post_layernorm, }) } } impl Module for VisionModel { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.embeddings)?; let encoder_outputs = self.encoder.forward(&xs, None)?; // Return the last hidden state rather than pooled outputs. encoder_outputs.apply(&self.post_layernorm) } } #[derive(Debug, Clone)] pub struct BlipForConditionalGeneration { vision_model: VisionModel, text_decoder: blip_text::TextLMHeadModel, } impl BlipForConditionalGeneration { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vision_model = VisionModel::new(&cfg.vision_config, vb.pp("vision_model"))?; let text_decoder = blip_text::TextLMHeadModel::new(&cfg.text_config, vb.pp("text_decoder"))?; Ok(Self { vision_model, text_decoder, }) } pub fn vision_model(&self) -> &VisionModel { &self.vision_model } pub fn text_decoder(&mut self) -> &mut blip_text::TextLMHeadModel { &mut self.text_decoder } pub fn reset_kv_cache(&mut self) { self.text_decoder.reset_kv_cache(); } }
candle/candle-transformers/src/models/blip.rs/0
{ "file_path": "candle/candle-transformers/src/models/blip.rs", "repo_id": "candle", "token_count": 4762 }
//! Implementation of the DINOv2 models from Meta Research. //! //! This module implements the DINOv2 vision transformer model from Meta AI Research. //! DINOv2 is a self-supervised learning model that can learn visual features //! without using any labeled data. See: ["DINOv2: Learning Robust Visual Features without Supervision"](https://github.com/facebookresearch/dinov2) //! //! ## Running an example with color map and CUDA //! //! ```bash //! cargo run \ //! --features cuda,depth_anything_v2 \ //! --package candle-examples \ //! --example depth_anything_v2 \ //! -- --color-map \ //! --image candle-examples/examples/yolo-v8/assets/bike.jpg //! ``` //! //! ## Running as an ImageNet classifier //! //! The model returns the probability for the image to belong to each of the 1000 ImageNet categories. //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.jpg" alt="" width=640> //! </div> //! //! ```bash //! cargo run \ //! --example dinov2 \ //! --release \ //! -- --image candle-examples/examples/yolo-v8/assets/bike.jpg //! //! > mountain bike, all-terrain bike, off-roader: 43.67% //! > bicycle-built-for-two, tandem bicycle, tandem: 33.20% //! > crash helmet : 13.23% //! > unicycle, monocycle : 2.44% //! > maillot : 2.42% //! ``` //! use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; const IMG_SIZE: usize = 518; const PATCH_SIZE: usize = 14; const NUM_CLASSES: usize = 1000; fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> { if bias { candle_nn::linear(in_dim, out_dim, vb) } else { candle_nn::linear_no_bias(in_dim, out_dim, vb) } } #[derive(Debug)] struct Attention { qkv: Linear, proj: Linear, num_heads: usize, scale: f64, } impl Attention { fn new( vb: VarBuilder, dim: usize, num_heads: usize, qkv_bias: bool, proj_bias: bool, ) -> Result<Self> { let qkv = linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?; let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?; let scale = 1. / ((dim / num_heads) as f64).sqrt(); Ok(Self { qkv, proj, num_heads, scale, }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, n, c) = xs.dims3()?; let qkv = self .qkv .forward(xs)? .reshape((b, n, 3, self.num_heads, c / self.num_heads))? .transpose(1, 2)? // 02134 .transpose(0, 1)? // 20134 .transpose(2, 3)?; // 20314 let q = (qkv.i(0)? * self.scale)?; let k = qkv.i(1)?.contiguous()?; let v = qkv.i(2)?.contiguous()?; let attn = candle_nn::ops::softmax(&q.matmul(&k.t()?)?, D::Minus1)?; let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?; self.proj.forward(&attn) } } #[derive(Debug)] struct LayerScale { gamma: Tensor, } impl LayerScale { fn new(vb: VarBuilder, dim: usize) -> Result<Self> { let gamma = vb.get(dim, "gamma")?; Ok(Self { gamma }) } } impl Module for LayerScale { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.broadcast_mul(&self.gamma) } } #[derive(Debug)] struct Mlp { fc1: Linear, fc2: Linear, } impl Mlp { fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> { let out_features = in_features; let fc1 = linear(vb.pp("fc1"), in_features, hidden_features, bias)?; let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?; Ok(Self { fc1, fc2 }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.fc1.forward(xs)?.gelu()?; self.fc2.forward(&xs) } } #[derive(Debug)] struct Block { norm1: LayerNorm, attn: Attention, ls1: LayerScale, norm2: LayerNorm, mlp: Mlp, ls2: LayerScale, } impl Block { fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> { let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?; let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true)?; let ls1 = LayerScale::new(vb.pp("ls1"), dim)?; let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?; let mlp = Mlp::new(vb.pp("mlp"), dim, dim * 4, true)?; let ls2 = LayerScale::new(vb.pp("ls2"), dim)?; Ok(Self { norm1, attn, ls1, norm2, mlp, ls2, }) } } impl Module for Block { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = self .ls1 .forward(&self.attn.forward(&self.norm1.forward(xs)?)?)?; let xs = (xs + residual)?; let residual = &xs; let xs = self .ls2 .forward(&self.mlp.forward(&self.norm2.forward(&xs)?)?)?; xs + residual } } #[derive(Debug)] struct PatchEmbed { proj: candle_nn::Conv2d, patch_size: (usize, usize), num_patches: usize, } impl PatchEmbed { fn new( vb: VarBuilder, img_size: usize, patch_size: usize, in_chans: usize, embed_dim: usize, ) -> Result<Self> { let config = candle_nn::Conv2dConfig { stride: patch_size, ..Default::default() }; let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?; let num_patches = (img_size / patch_size) * (img_size / patch_size); Ok(Self { proj, patch_size: (patch_size, patch_size), num_patches, }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _c, h, w) = xs.dims4()?; let (patch_h, patch_w) = self.patch_size; if (h % patch_h) != 0 { candle::bail!("image height {h} is not a multiple of patch height {patch_h}") } if (w % patch_w) != 0 { candle::bail!("image width {w} is not a multiple of patch width {patch_w}") } let xs = self.proj.forward(xs)?; let (b, c, h, w) = xs.dims4()?; // flatten embeddings. xs.reshape((b, c, h * w))?.transpose(1, 2) } } #[derive(Debug)] pub struct DinoVisionTransformer { patch_embed: PatchEmbed, cls_token: Tensor, pos_embed: Tensor, blocks: Vec<Block>, norm: LayerNorm, head: Linear, } impl DinoVisionTransformer { pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> { let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?; let cls_token = vb.get((1, 1, embed_dim), "cls_token")?; let num_tokens = 1; let pos_embed = vb.get( (1, patch_embed.num_patches + num_tokens, embed_dim), "pos_embed", )?; let head = linear(vb.pp("head"), 2 * embed_dim, NUM_CLASSES, true)?; let norm = layer_norm(embed_dim, 1e-5, vb.pp("norm"))?; let vb_b = vb.pp("blocks"); let blocks = (0..depth) .map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads)) .collect::<Result<Vec<_>>>()?; Ok(Self { patch_embed, cls_token, pos_embed, blocks, norm, head, }) } fn interpolate_pos_encoding(&self, xs: &Tensor, w: usize, h: usize) -> Result<Tensor> { let npatch = xs.dim(1)? - 1; let n = self.pos_embed.dim(1)? - 1; let sqrt_n = (n as f64).sqrt(); if npatch == n && w == h { return Ok(xs.clone()); } let class_pos_embed = self.pos_embed.i((.., ..1))?; let patch_pos_embed = self.pos_embed.i((.., 1..))?; let dim = xs.dim(D::Minus1)?; let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1); let patch_pos_embed = patch_pos_embed .reshape((1, sqrt_n as usize, sqrt_n as usize, dim))? .transpose(2, 3)? .transpose(1, 2)?; // This uses bicubic interpolation in the original implementation. let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?; let el_count = patch_pos_embed.shape().elem_count(); let patch_pos_embed = patch_pos_embed .transpose(1, 2)? .transpose(2, 3)? .reshape((1, el_count / dim, dim))?; Tensor::cat(&[&class_pos_embed, &patch_pos_embed], 1) } fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _nc, w, h) = xs.dims4()?; let xs = self.patch_embed.forward(xs)?; let xs = Tensor::cat(&[&self.cls_token, &xs], 1)?; &xs + &self.interpolate_pos_encoding(&xs, w, h)? } fn get_intermediate_layers_not_chunked( &self, xs: &Tensor, blocks_to_take: &[usize], ) -> Result<Vec<Tensor>> { let mut xs = self.prepare_tokens_with_mask(xs)?; let mut output = Vec::new(); for (i, blk) in self.blocks.iter().enumerate() { xs = blk.forward(&xs)?; if blocks_to_take.contains(&i) { output.push(xs.clone()); } } if output.len() != blocks_to_take.len() { candle::bail!( "only {} / {} blocks found", output.len(), blocks_to_take.len() ); } Ok(output) } pub fn get_intermediate_layers( &self, xs: &Tensor, blocks_to_take: &[usize], reshape: bool, return_class_token: bool, norm: bool, ) -> Result<Tensor> { let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?; let outputs = if norm { outputs .iter() .map(|out| self.norm.forward(out)) .collect::<Result<Vec<_>>>()? } else { outputs }; let class_tokens = outputs .iter() .map(|out| out.i((.., 0))) .collect::<Result<Vec<_>>>()?; let outputs = outputs .iter() .map(|out| out.i((.., 1..))) .collect::<Result<Vec<_>>>()?; let outputs = if reshape { let (b, _c, w, h) = xs.dims4()?; let patch_size = self.patch_embed.patch_size.0; let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size)); outputs .iter() .map(|out| { out.reshape((b, w / patch_size, h / patch_size, num_channels))? .transpose(2, 3)? .transpose(1, 2) }) .collect::<Result<Vec<_>>>()? } else { outputs }; let outputs = if return_class_token { outputs .iter() .zip(class_tokens.iter()) .map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1)) .collect::<Result<Vec<_>>>()? } else { outputs }; Tensor::stack(&outputs[..], 0) } } impl Module for DinoVisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.prepare_tokens_with_mask(xs)?; for blk in self.blocks.iter() { xs = blk.forward(&xs)? } let xs = self.norm.forward(&xs)?; let xs_norm_clstoken = xs.i((.., 0))?; let xs_norm_patchtokens = xs.i((.., 1..))?.mean(1)?; let xs = Tensor::cat(&[xs_norm_clstoken, xs_norm_patchtokens], D::Minus1)?; self.head.forward(&xs) } } pub fn vit_small(vb: VarBuilder) -> Result<DinoVisionTransformer> { DinoVisionTransformer::new(vb, 12, 384, 6) }
candle/candle-transformers/src/models/dinov2.rs/0
{ "file_path": "candle/candle-transformers/src/models/dinov2.rs", "repo_id": "candle", "token_count": 6312 }
//! GLM-4 inference implementation. //! //! An open bilingual language model with 130B parameters. //! //! Based on implementation from [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) use crate::models::with_tracing::{linear_b as linear, Linear}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::VarBuilder; fn default_one() -> usize { 1 } #[derive(Debug, Clone, serde::Deserialize, Default)] pub struct Config { pub num_layers: usize, pub padded_vocab_size: usize, pub hidden_size: usize, pub ffn_hidden_size: usize, pub kv_channels: usize, pub num_attention_heads: usize, pub seq_length: usize, pub layernorm_epsilon: f64, pub rmsnorm: bool, pub apply_residual_connection_post_layernorm: bool, pub post_layer_norm: bool, pub add_bias_linear: bool, pub add_qkv_bias: bool, pub bias_dropout_fusion: bool, pub multi_query_attention: bool, pub multi_query_group_num: usize, pub apply_query_key_layer_scaling: bool, pub attention_softmax_in_fp32: bool, pub fp32_residual_connection: bool, #[serde(default = "default_one")] pub rope_ratio: usize, } impl Config { pub fn glm4() -> Self { Self { num_layers: 40, padded_vocab_size: 151552, hidden_size: 4096, ffn_hidden_size: 13696, kv_channels: 128, num_attention_heads: 32, seq_length: 8192, layernorm_epsilon: 1e-5, rmsnorm: true, apply_residual_connection_post_layernorm: false, post_layer_norm: true, add_bias_linear: false, add_qkv_bias: true, bias_dropout_fusion: true, multi_query_attention: true, multi_query_group_num: 2, apply_query_key_layer_scaling: true, attention_softmax_in_fp32: true, fp32_residual_connection: false, rope_ratio: 500, } } } #[derive(Debug, Clone)] struct RotaryEmbedding { cache: Tensor, } impl RotaryEmbedding { fn new(cfg: &Config, dtype: DType, dev: &Device) -> Result<Self> { let rotary_dim = cfg.kv_channels; let n_elem = rotary_dim / 2; let base = 10_000f64 * cfg.rope_ratio as f64; let inv_freq: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / base.powf(i as f64 / n_elem as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, cfg.seq_length as u32, dev)? .to_dtype(dtype)? .reshape((cfg.seq_length, 1))?; let freqs = t.matmul(&inv_freq)?; let cache = Tensor::stack(&[&freqs.cos()?, &freqs.sin()?], D::Minus1)?; Ok(Self { cache }) } fn apply(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (seqlen, _b, np, _hn) = xs.dims4()?; let cache = self.cache.narrow(0, seqlen_offset, seqlen)?; let rot_dim = cache.dim(D::Minus2)? * 2; let (xs, xs_pass) = ( xs.narrow(D::Minus1, 0, rot_dim)?, xs.narrow(D::Minus1, rot_dim, rot_dim)?, ); let xshaped = xs.reshape((seqlen, (), np, rot_dim / 2, 2))?; let cache = cache.reshape((seqlen, (), 1, rot_dim / 2, 2))?; let (xshaped0, xshaped1) = ( xshaped.i((.., .., .., .., 0))?, xshaped.i((.., .., .., .., 1))?, ); let (cache0, cache1) = (cache.i((.., .., .., .., 0))?, cache.i((.., .., .., .., 1))?); let xs_out = Tensor::stack( &[ (xshaped0.broadcast_mul(&cache0)? - xshaped1.broadcast_mul(&cache1)?)?, (xshaped1.broadcast_mul(&cache0)? + xshaped0.broadcast_mul(&cache1)?)?, ], D::Minus1, )?; let xs_out = xs_out.flatten_from(3)?; Tensor::cat(&[xs_out, xs_pass], D::Minus1) } } #[derive(Debug, Clone)] struct CoreAttention { coeff: Option<f64>, norm_factor: f64, dtype: DType, } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32, dtype: DType) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true.to_dtype(dtype)?, on_false)?; Ok(m) } impl CoreAttention { fn new(layer_number: usize, cfg: &Config, dtype: DType) -> Result<Self> { let norm_factor = (cfg.kv_channels as f64).sqrt(); let (norm_factor, coeff) = if cfg.apply_query_key_layer_scaling { let coeff = f64::max(1.0, layer_number as f64); (norm_factor * coeff, Some(coeff)) } else { (norm_factor, None) }; Ok(Self { coeff, norm_factor, dtype, }) } fn forward( &self, query_layer: &Tensor, key_layer: &Tensor, value_layer: &Tensor, attention_mask: &Option<Tensor>, ) -> Result<Tensor> { let output_size = ( query_layer.dim(1)?, // b query_layer.dim(2)?, // np query_layer.dim(0)?, // sq key_layer.dim(0)?, // sk ); let query_layer = query_layer.reshape((output_size.2, output_size.0 * output_size.1, ()))?; let key_layer = key_layer.reshape((output_size.3, output_size.0 * output_size.1, ()))?; let matmul_result = Tensor::matmul( &query_layer.transpose(0, 1)?.contiguous()?, &key_layer.transpose(0, 1)?.transpose(1, 2)?.contiguous()?, )?; let matmul_result = (matmul_result / self.norm_factor)?.reshape(output_size)?; let matmul_result = match self.coeff { None => matmul_result, Some(coeff) => (matmul_result * coeff)?, }; let attention_scores = match attention_mask { Some(mask) => masked_fill( &matmul_result, &mask.broadcast_left((matmul_result.dim(0)?, matmul_result.dim(1)?))?, f32::NEG_INFINITY, self.dtype, )?, None => matmul_result, }; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; let output_size = ( value_layer.dim(1)?, value_layer.dim(2)?, query_layer.dim(0)?, value_layer.dim(3)?, ); let value_layer = value_layer.reshape((value_layer.dim(0)?, output_size.0 * output_size.1, ()))?; let attention_probs = attention_probs.reshape((output_size.0 * output_size.1, output_size.2, ()))?; let context_layer = Tensor::matmul( &attention_probs.contiguous()?, &value_layer.transpose(0, 1)?.contiguous()?, )?; let context_layer = context_layer.reshape(output_size)?; let context_layer = context_layer.permute((2, 0, 1, 3))?.contiguous()?; context_layer.flatten_from(D::Minus2) } } #[derive(Debug, Clone)] struct SelfAttention { query_key_value: Linear, core_attention: CoreAttention, dense: Linear, multi_query_attention: bool, num_attention_heads_per_partition: usize, num_multi_query_groups_per_partition: usize, hidden_size_per_attention_head: usize, kv_cache: Option<(Tensor, Tensor)>, } impl SelfAttention { fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let projection_size = cfg.kv_channels * cfg.num_attention_heads; let hidden_size_per_attention_head = projection_size / cfg.num_attention_heads; let qkv_hidden_size = if cfg.multi_query_attention { projection_size + 2 * hidden_size_per_attention_head * cfg.multi_query_group_num } else { 3 * projection_size }; let query_key_value = linear( cfg.hidden_size, qkv_hidden_size, cfg.add_bias_linear || cfg.add_qkv_bias, vb.pp("query_key_value"), )?; let core_attention = CoreAttention::new(layer_number, cfg, vb.dtype())?; let dense = linear( cfg.hidden_size, cfg.hidden_size, cfg.add_bias_linear, vb.pp("dense"), )?; Ok(Self { query_key_value, core_attention, dense, multi_query_attention: cfg.multi_query_attention, num_attention_heads_per_partition: cfg.num_attention_heads, num_multi_query_groups_per_partition: cfg.multi_query_group_num, hidden_size_per_attention_head: cfg.kv_channels, kv_cache: None, }) } fn reset_kv_cache(&mut self) { self.kv_cache = None } fn forward( &mut self, xs: &Tensor, attention_mask: &Option<Tensor>, rotary_emb: &RotaryEmbedding, ) -> Result<Tensor> { let mixed_x_layer = xs.apply(&self.query_key_value)?; if !self.multi_query_attention { candle::bail!("only multi_query_attention=true is supported") } let hpa = self.hidden_size_per_attention_head; let query_layer = mixed_x_layer.narrow(D::Minus1, 0, self.num_attention_heads_per_partition * hpa)?; let key_layer = mixed_x_layer.narrow( D::Minus1, self.num_attention_heads_per_partition * hpa, self.num_multi_query_groups_per_partition * hpa, )?; let value_layer = mixed_x_layer.narrow( D::Minus1, self.num_attention_heads_per_partition * hpa + self.num_multi_query_groups_per_partition * hpa, self.num_multi_query_groups_per_partition * hpa, )?; let query_layer = query_layer.reshape(( query_layer.dim(0)?, query_layer.dim(1)?, self.num_attention_heads_per_partition, hpa, ))?; let key_layer = key_layer.reshape(( key_layer.dim(0)?, key_layer.dim(1)?, self.num_multi_query_groups_per_partition, hpa, ))?; let value_layer = value_layer.reshape(( value_layer.dim(0)?, value_layer.dim(1)?, self.num_multi_query_groups_per_partition, hpa, ))?; // Rotary embeddings. let seqlen_offset = match &self.kv_cache { None => 0, Some((prev_k, _)) => prev_k.dim(0)?, }; let query_layer = rotary_emb.apply(&query_layer, seqlen_offset)?; let key_layer = rotary_emb.apply(&key_layer, seqlen_offset)?; // KV cache. let (key_layer, value_layer) = match &self.kv_cache { None => (key_layer, value_layer), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &key_layer], 0)?; let v = Tensor::cat(&[prev_v, &value_layer], 0)?; (k, v) } }; self.kv_cache = Some((key_layer.clone(), value_layer.clone())); // Repeat KV. let ratio = self.num_attention_heads_per_partition / self.num_multi_query_groups_per_partition; let key_layer = { let (d0, d1, d2, d3) = key_layer.dims4()?; key_layer .unsqueeze(D::Minus2)? .expand((d0, d1, d2, ratio, d3))? .reshape(( d0, d1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ))? }; let value_layer = { let (d0, d1, d2, d3) = value_layer.dims4()?; value_layer .unsqueeze(D::Minus2)? .expand((d0, d1, d2, ratio, d3))? .reshape(( d0, d1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ))? }; let context_layer = self.core_attention .forward(&query_layer, &key_layer, &value_layer, attention_mask)?; let output = context_layer.apply(&self.dense)?; Ok(output) } } #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone)] struct MLP { dense_h_to_4h: Linear, dense_4h_to_h: Linear, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense_h_to_4h = linear( cfg.hidden_size, cfg.ffn_hidden_size * 2, cfg.add_bias_linear, vb.pp("dense_h_to_4h"), )?; let dense_4h_to_h = linear( cfg.ffn_hidden_size, cfg.hidden_size, cfg.add_bias_linear, vb.pp("dense_4h_to_h"), )?; Ok(Self { dense_4h_to_h, dense_h_to_4h, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense_h_to_4h)? .apply(&candle_nn::Activation::Swiglu)? .apply(&self.dense_4h_to_h) } } #[derive(Debug, Clone)] struct Block { input_layernorm: candle_nn::LayerNorm, self_attention: SelfAttention, post_attention_layernorm: candle_nn::LayerNorm, mlp: MLP, apply_residual_connection_post_layernorm: bool, } impl Block { fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let input_layernorm = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("input_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("input_layernorm"), )? }; let post_attention_layernorm = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("post_attention_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("post_attention_layernorm"), )? }; let self_attention = SelfAttention::new(layer_number, cfg, vb.pp("self_attention"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { input_layernorm, self_attention, post_attention_layernorm, mlp, apply_residual_connection_post_layernorm: cfg.apply_residual_connection_post_layernorm, }) } fn reset_kv_cache(&mut self) { self.self_attention.reset_kv_cache() } fn forward( &mut self, xs: &Tensor, attention_mask: &Option<Tensor>, rotary_emb: &RotaryEmbedding, ) -> Result<Tensor> { let layernorm_output = xs.apply(&self.input_layernorm)?; let attention_output = self.self_attention .forward(&layernorm_output, attention_mask, rotary_emb)?; let residual = if self.apply_residual_connection_post_layernorm { &layernorm_output } else { xs }; let layernorm_input = (residual + attention_output)?; let layernorm_output = layernorm_input.apply(&self.post_attention_layernorm)?; let mlp_output = layernorm_output.apply(&self.mlp)?; let residual = if self.apply_residual_connection_post_layernorm { &layernorm_output } else { &layernorm_input }; mlp_output + residual } } #[derive(Debug, Clone)] struct Transformer { layers: Vec<Block>, final_layernorm: Option<candle_nn::LayerNorm>, rotary_emb: RotaryEmbedding, } impl Transformer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_l = vb.pp("layers"); let mut layers = Vec::with_capacity(cfg.num_layers); for layer_index in 0..cfg.num_layers { let block = Block::new(layer_index + 1, cfg, vb_l.pp(layer_index))?; layers.push(block) } let final_layernorm = if cfg.post_layer_norm { let ln = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("final_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("final_layernorm"), )? }; Some(ln) } else { None }; let rotary_emb = RotaryEmbedding::new(cfg, vb.dtype(), vb.device())?; Ok(Self { layers, final_layernorm, rotary_emb, }) } fn reset_kv_cache(&mut self) { for block in self.layers.iter_mut() { block.reset_kv_cache() } } fn forward(&mut self, xs: &Tensor, attention_mask: &Option<Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for block in self.layers.iter_mut() { xs = block.forward(&xs, attention_mask, &self.rotary_emb)? } match self.final_layernorm.as_ref() { None => Ok(xs), Some(ln) => xs.apply(ln), } } } #[derive(Debug, Clone)] struct Embedding { word_embeddings: candle_nn::Embedding, fp32_residual_connection: bool, } impl Embedding { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let word_embeddings = candle_nn::embedding( cfg.padded_vocab_size, cfg.hidden_size, vb.pp("word_embeddings"), )?; Ok(Self { word_embeddings, fp32_residual_connection: cfg.fp32_residual_connection, }) } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.word_embeddings.forward(xs)?.transpose(0, 1)?; // b,s,h -> s,b,h if self.fp32_residual_connection { xs.to_dtype(candle::DType::F32) } else { xs.contiguous() } } } #[derive(Debug, Clone)] pub struct Model { embedding: Embedding, encoder: Transformer, output_layer: Linear, } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("transformer"); let embedding = Embedding::new(cfg, vb.pp("embedding"))?; let encoder = Transformer::new(cfg, vb.pp("encoder"))?; let output_layer = linear( cfg.hidden_size, cfg.padded_vocab_size, false, vb.pp("output_layer"), )?; Ok(Self { embedding, encoder, output_layer, }) } pub fn reset_kv_cache(&mut self) { self.encoder.reset_kv_cache() } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let (_b_size, seq_len) = xs.dims2()?; let input_embeds = xs.apply(&self.embedding)?; let attention_mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.device())?) }; let xs = self.encoder.forward(&input_embeds, &attention_mask)?; let lm_logits = xs.i(seq_len - 1)?.apply(&self.output_layer)?; Ok(lm_logits) } }
candle/candle-transformers/src/models/glm4.rs/0
{ "file_path": "candle/candle-transformers/src/models/glm4.rs", "repo_id": "candle", "token_count": 10660 }
//! mimi model //! //! [Mimi](https://huggingface.co/kyutai/mimi) is a state of the art audio //! compression model using an encoder/decoder architecture with residual vector //! quantization. The candle implementation supports streaming meaning that it's //! possible to encode or decode a stream of audio tokens on the flight to provide //! low latency interaction with an audio model. //! //! - 🤗 [HuggingFace Model Card](https://huggingface.co/kyutai/mimi) //! - 💻 [GitHub](https://github.com/kyutai-labs/moshi) //! //! //! # Example //! ```bash //! # Generating some audio tokens from an audio files. //! wget https://github.com/metavoiceio/metavoice-src/raw/main/assets/bria.mp3 //! cargo run --example mimi \ //! --features mimi --release -- \ //! audio-to-code bria.mp3 bria.safetensors //! //! # And decoding the audio tokens back into a sound file. //! cargo run --example mimi //! --features mimi --release -- \ //! code-to-audio bria.safetensors bria.wav //! // Copyright (c) Kyutai, all rights reserved. // This source code is licensed under the license found in the // LICENSE file in the root directory of this source tree. pub use candle; pub use candle_nn; pub mod conv; pub mod encodec; pub mod quantization; pub mod seanet; pub mod transformer; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum NormType { RmsNorm, LayerNorm, } pub use encodec::{load, Config, Encodec as Model};
candle/candle-transformers/src/models/mimi/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/mimi/mod.rs", "repo_id": "candle", "token_count": 480 }
//! ModernBERT //! //! ModernBERT is a modernized bidirectional encoder-only Transformer model. //! - [Arxiv](https://arxiv.org/abs/2412.13663) "Smarter, Better, Faster, Longer: A Modern Bidirectional Encoder for Fast, Memory Efficient, and Long Context Finetuning and Inference" //! - Upstream [Github repo](https://github.com/AnswerDotAI/ModernBERT). //! - See modernbert in [candle-examples](https://github.com/huggingface/candle/tree/main/candle-examples/) for runnable code //! use candle::{DType, Device, Result, Tensor, D}; use candle_nn::{ embedding, layer_norm_no_bias, linear_no_bias, ops::softmax, Embedding, LayerNorm, Linear, Module, VarBuilder, }; use serde::Deserialize; use core::f32; use std::sync::Arc; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub intermediate_size: usize, pub max_position_embeddings: usize, pub layer_norm_eps: f64, pub pad_token_id: u32, pub global_attn_every_n_layers: usize, pub global_rope_theta: f64, pub local_attention: usize, pub local_rope_theta: f64, } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, config: &Config, rope_theta: f64, dev: &Device) -> Result<Self> { let dim = config.hidden_size / config.num_attention_heads; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let max_seq_len = config.max_position_embeddings; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv(&self, q: &Tensor, k: &Tensor) -> Result<(Tensor, Tensor)> { let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &self.cos, &self.sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &self.cos, &self.sin)?; Ok((q_embed, k_embed)) } } #[derive(Clone)] struct ModernBertAttention { qkv: Linear, proj: Linear, num_attention_heads: usize, attention_head_size: usize, rotary_emb: Arc<RotaryEmbedding>, } impl ModernBertAttention { fn load(vb: VarBuilder, config: &Config, rotary_emb: Arc<RotaryEmbedding>) -> Result<Self> { let num_attention_heads = config.num_attention_heads; let attention_head_size = config.hidden_size / config.num_attention_heads; let qkv = linear_no_bias(config.hidden_size, config.hidden_size * 3, vb.pp("Wqkv"))?; let proj = linear_no_bias(config.hidden_size, config.hidden_size, vb.pp("Wo"))?; Ok(Self { qkv, proj, num_attention_heads, attention_head_size, rotary_emb, }) } fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let xs = hidden_states.clone(); let (b, seq_len, d) = xs.dims3()?; let qkv = xs .apply(&self.qkv)? .reshape(( b, seq_len, 3, self.num_attention_heads, self.attention_head_size, ))? .permute((2, 0, 3, 1, 4))?; let q = qkv.get(0)?; let k = qkv.get(1)?; let v = qkv.get(2)?; let (q, k) = self.rotary_emb.apply_rotary_emb_qkv(&q, &k)?; let scale = (self.attention_head_size as f64).powf(-0.5); let q = (q * scale)?; let att = q.matmul(&k.transpose(D::Minus2, D::Minus1)?)?; let att = att.broadcast_add(attention_mask)?; let att = softmax(&att, D::Minus1)?; let xs = att.matmul(&v)?; let xs = xs.transpose(1, 2)?.reshape((b, seq_len, d))?; let xs = xs.apply(&self.proj)?; let xs = xs.reshape((b, seq_len, d))?; Ok(xs) } } #[derive(Clone)] pub struct ModernBertMLP { wi: Linear, wo: Linear, } impl ModernBertMLP { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let wi = linear_no_bias( config.hidden_size, config.intermediate_size * 2, vb.pp("Wi"), )?; let wo = linear_no_bias(config.intermediate_size, config.hidden_size, vb.pp("Wo"))?; Ok(Self { wi, wo }) } } impl Module for ModernBertMLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.wi)?; let xs = xs.chunk(2, D::Minus1)?; let xs = (&xs[0].gelu_erf()? * &xs[1])?.apply(&self.wo)?; // GeGLU Ok(xs) } } #[derive(Clone)] pub struct ModernBertLayer { attn: ModernBertAttention, mlp: ModernBertMLP, attn_norm: Option<LayerNorm>, mlp_norm: LayerNorm, uses_local_attention: bool, } impl ModernBertLayer { fn load( vb: VarBuilder, config: &Config, rotary_emb: Arc<RotaryEmbedding>, uses_local_attention: bool, ) -> Result<Self> { let attn = ModernBertAttention::load(vb.pp("attn"), config, rotary_emb)?; let mlp = ModernBertMLP::load(vb.pp("mlp"), config)?; let attn_norm = layer_norm_no_bias( config.hidden_size, config.layer_norm_eps, vb.pp("attn_norm"), ) .ok(); let mlp_norm = layer_norm_no_bias(config.hidden_size, config.layer_norm_eps, vb.pp("mlp_norm"))?; Ok(Self { attn, mlp, attn_norm, mlp_norm, uses_local_attention, }) } fn forward( &self, xs: &Tensor, global_attention_mask: &Tensor, local_attention_mask: &Tensor, ) -> Result<Tensor> { let residual = xs.clone(); let mut xs = xs.clone(); if let Some(norm) = &self.attn_norm { xs = xs.apply(norm)?; } let attention_mask = if self.uses_local_attention { &global_attention_mask.broadcast_add(local_attention_mask)? } else { global_attention_mask }; let xs = self.attn.forward(&xs, attention_mask)?; let xs = (xs + residual)?; let mlp_out = xs.apply(&self.mlp_norm)?.apply(&self.mlp)?; let xs = (xs + mlp_out)?; Ok(xs) } } #[derive(Clone)] pub struct ModernBertHead { dense: Linear, norm: LayerNorm, } impl ModernBertHead { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let dense = linear_no_bias(config.hidden_size, config.hidden_size, vb.pp("dense"))?; let norm = layer_norm_no_bias(config.hidden_size, config.layer_norm_eps, vb.pp("norm"))?; Ok(Self { dense, norm }) } } impl Module for ModernBertHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.dense)?.gelu_erf()?.apply(&self.norm)?; Ok(xs) } } #[derive(Clone)] pub struct ModernBertDecoder { decoder: Linear, } impl ModernBertDecoder { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { // The decoder weights are tied with the embeddings layer weights let decoder_weights = vb.get( (config.vocab_size, config.hidden_size), "model.embeddings.tok_embeddings.weight", )?; let decoder_bias = vb.get(config.vocab_size, "decoder.bias")?; let decoder = Linear::new(decoder_weights, Some(decoder_bias)); Ok(Self { decoder }) } } impl Module for ModernBertDecoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.decoder)?; Ok(xs) } } // Global attention mask calculated from padded token inputs fn prepare_4d_attention_mask( mask: &Tensor, dtype: DType, tgt_len: Option<usize>, ) -> Result<Tensor> { let bsz = mask.dim(0)?; let src_len = mask.dim(1)?; let tgt_len = tgt_len.unwrap_or(src_len); let expanded_mask = mask .unsqueeze(1)? .unsqueeze(2)? .expand((bsz, 1, tgt_len, src_len))? .to_dtype(dtype)?; let inverted_mask = (1.0 - expanded_mask)?; (inverted_mask * f32::MIN as f64)?.to_dtype(dtype) } // Attention mask caused by the sliding window fn get_local_attention_mask( seq_len: usize, max_distance: usize, device: &Device, ) -> Result<Tensor> { let mask: Vec<_> = (0..seq_len) .flat_map(|i| { (0..seq_len).map(move |j| { if (j as i32 - i as i32).abs() > max_distance as i32 { f32::NEG_INFINITY } else { 0. } }) }) .collect(); Tensor::from_slice(&mask, (seq_len, seq_len), device) } // ModernBERT backbone #[derive(Clone)] pub struct ModernBert { word_embeddings: Embedding, norm: LayerNorm, layers: Vec<ModernBertLayer>, final_norm: LayerNorm, head: ModernBertHead, local_attention_size: usize, } impl ModernBert { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let word_embeddings = embedding( config.vocab_size, config.hidden_size, vb.pp("model.embeddings.tok_embeddings"), )?; let norm = layer_norm_no_bias( config.hidden_size, config.layer_norm_eps, vb.pp("model.embeddings.norm"), )?; let global_rotary_emb = Arc::new(RotaryEmbedding::new( vb.dtype(), config, config.global_rope_theta, vb.device(), )?); let local_rotary_emb = Arc::new(RotaryEmbedding::new( vb.dtype(), config, config.local_rope_theta, vb.device(), )?); let mut layers = Vec::with_capacity(config.num_hidden_layers); for layer_id in 0..config.num_hidden_layers { let layer_uses_local_attention = layer_id % config.global_attn_every_n_layers != 0; layers.push(ModernBertLayer::load( vb.pp(format!("model.layers.{layer_id}")), config, if layer_uses_local_attention { local_rotary_emb.clone() } else { global_rotary_emb.clone() }, layer_uses_local_attention, )?); } let final_norm = layer_norm_no_bias( config.hidden_size, config.layer_norm_eps, vb.pp("model.final_norm"), )?; let head = ModernBertHead::load(vb.pp("head"), config)?; Ok(Self { word_embeddings, norm, layers, final_norm, head, local_attention_size: config.local_attention, }) } fn forward(&self, xs: &Tensor, mask: &Tensor) -> Result<Tensor> { let seq_len = xs.shape().dims()[1]; let global_attention_mask = prepare_4d_attention_mask(mask, DType::F32, None)?.to_device(xs.device())?; let local_attention_mask = get_local_attention_mask(seq_len, self.local_attention_size / 2, xs.device())?; let mut xs = xs.apply(&self.word_embeddings)?.apply(&self.norm)?; for layer in self.layers.iter() { xs = layer.forward(&xs, &global_attention_mask, &local_attention_mask)?; } let xs = xs.apply(&self.final_norm)?.apply(&self.head)?; Ok(xs) } } // ModernBERT for the fill-mask task #[derive(Clone)] pub struct ModernBertForMaskedLM { model: ModernBert, decoder: ModernBertDecoder, } impl ModernBertForMaskedLM { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let model = ModernBert::load(vb.clone(), config)?; let decoder = ModernBertDecoder::load(vb.clone(), config)?; Ok(Self { model, decoder }) } pub fn forward(&self, xs: &Tensor, mask: &Tensor) -> Result<Tensor> { let xs = self.model.forward(xs, mask)?.apply(&self.decoder)?; Ok(xs) } }
candle/candle-transformers/src/models/modernbert.rs/0
{ "file_path": "candle/candle-transformers/src/models/modernbert.rs", "repo_id": "candle", "token_count": 6231 }
use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{linear_b, rms_norm, Linear, RmsNorm, VarBuilder}; fn default_act() -> candle_nn::Activation { candle_nn::Activation::Silu } fn default_hidden_size() -> usize { 1024 } fn default_intermediate_size() -> usize { 4096 } fn default_num_channels() -> usize { 3 } fn default_num_hidden_layers() -> usize { 24 } fn default_num_attention_heads() -> usize { 16 } #[derive(serde::Deserialize, Debug, Clone)] pub struct Config { #[serde(default = "default_hidden_size")] pub hidden_size: usize, #[serde(default = "default_num_channels")] pub num_channels: usize, pub image_size: usize, pub patch_size: usize, pub rope_theta: f64, #[serde(default = "default_intermediate_size")] pub intermediate_size: usize, #[serde(default = "default_num_hidden_layers")] pub num_hidden_layers: usize, pub head_dim: Option<usize>, #[serde(default = "default_num_attention_heads")] pub num_attention_heads: usize, #[serde(default = "default_act")] pub hidden_act: candle_nn::Activation, } impl Config { pub fn pixtral_12b_2409() -> Self { Self { hidden_size: 1024, num_channels: 3, image_size: 1024, patch_size: 16, rope_theta: 10000.0, intermediate_size: 4096, num_hidden_layers: 24, num_attention_heads: 16, head_dim: None, // Default hidden_act: candle_nn::Activation::Silu, } } fn head_dim(&self) -> usize { self.head_dim .unwrap_or(self.hidden_size / self.num_attention_heads) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, scale: f64, num_heads: usize, head_dim: usize, } impl Attention { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let head_dim = cfg.head_dim(); let q_proj = linear_b(h, h, false, vb.pp("q_proj"))?; let k_proj = linear_b(h, h, false, vb.pp("k_proj"))?; let v_proj = linear_b(h, h, false, vb.pp("v_proj"))?; let o_proj = linear_b(h, h, false, vb.pp("o_proj"))?; let scale = (head_dim as f64).powf(-0.5); Ok(Self { q_proj, k_proj, v_proj, o_proj, scale, num_heads, head_dim, }) } fn forward( &self, xs: &Tensor, emb: &RotaryEmbedding, subsampled_positions: Option<&Tensor>, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let (b, patches, _) = xs.dims3()?; let query_states = xs.apply(&self.q_proj)?; let key_states = xs.apply(&self.k_proj)?; let value_states = xs.apply(&self.v_proj)?; let shape = (b, patches, self.num_heads, self.head_dim); let query_states = query_states.reshape(shape)?.transpose(1, 2)?.contiguous()?; let key_states = key_states.reshape(shape)?.transpose(1, 2)?.contiguous()?; let value_states = value_states.reshape(shape)?.transpose(1, 2)?.contiguous()?; let (query_states, key_states) = emb.apply_rotary_emb_qkv(&query_states, &key_states, subsampled_positions)?; let attn_weights = (query_states.matmul(&key_states.t()?)? * self.scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights .matmul(&value_states)? .transpose(1, 2)? .reshape((b, patches, ()))? .apply(&self.o_proj) } } #[derive(Debug, Clone)] struct Mlp { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: candle_nn::Activation, } impl Mlp { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let (h, i) = (cfg.hidden_size, cfg.intermediate_size); let gate_proj = linear_b(h, i, false, vb.pp("gate_proj"))?; let up_proj = linear_b(h, i, false, vb.pp("up_proj"))?; let down_proj = linear_b(i, h, false, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { (xs.apply(&self.gate_proj)?.apply(&self.act_fn)? * xs.apply(&self.up_proj))? .apply(&self.down_proj) } } #[derive(Debug, Clone)] struct AttentionLayer { attention_norm: RmsNorm, feed_forward: Mlp, attention: Attention, ffn_norm: RmsNorm, } impl AttentionLayer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention_norm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("attention_norm"))?; let feed_forward = Mlp::new(cfg, vb.pp("feed_forward"))?; let attention = Attention::new(cfg, vb.pp("attention"))?; let ffn_norm = rms_norm(cfg.hidden_size, 1e-5, vb.pp("ffn_norm"))?; Ok(Self { attention_norm, feed_forward, attention, ffn_norm, }) } fn forward( &self, xs: &Tensor, emb: &RotaryEmbedding, subsampled_positions: Option<&Tensor>, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let residual = xs; let xs = self.attention.forward( &xs.apply(&self.attention_norm)?, emb, subsampled_positions, attention_mask, )?; let xs = (residual + xs)?; let residual = &xs; let xs = xs.apply(&self.ffn_norm)?.apply(&self.feed_forward)?; xs + residual } } #[derive(Debug, Clone)] struct Transformer { layers: Vec<AttentionLayer>, } impl Transformer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb = vb.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = AttentionLayer::new(cfg, vb.pp(layer_idx))?; layers.push(layer) } Ok(Self { layers }) } fn forward( &self, xs: &Tensor, emb: &RotaryEmbedding, subsampled_positions: Option<&Tensor>, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, emb, subsampled_positions, attention_mask)? } Ok(xs) } } #[derive(Debug, Clone)] struct RotaryEmbedding { cos: Tensor, sin: Tensor, } impl RotaryEmbedding { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dtype = vb.dtype(); let dev = vb.device(); let dim = cfg.head_dim(); let rope_theta = cfg.rope_theta as f32; let max_patches_per_side = cfg.image_size / cfg.patch_size; let freqs: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / rope_theta.powf(i as f32 / dim as f32)) .collect(); let freqs_h = freqs.iter().step_by(2).copied().collect::<Vec<_>>(); let freqs_h = Tensor::new(freqs_h, dev)?; let freqs_w = freqs.iter().skip(1).step_by(2).copied().collect::<Vec<_>>(); let freqs_w = Tensor::new(freqs_w, dev)?; let h = Tensor::arange(0u32, max_patches_per_side as u32, dev)?.to_dtype(DType::F32)?; let w = Tensor::arange(0u32, max_patches_per_side as u32, dev)?.to_dtype(DType::F32)?; let freqs_h = h.unsqueeze(1)?.matmul(&freqs_h.unsqueeze(0)?)?; let freqs_w = w.unsqueeze(1)?.matmul(&freqs_w.unsqueeze(0)?)?; let inv_freq = Tensor::cat( &[ freqs_h.unsqueeze(1)?.repeat((1, max_patches_per_side, 1))?, freqs_w.unsqueeze(0)?.repeat((max_patches_per_side, 1, 1))?, ], D::Minus1, )? .reshape(((), dim / 2))?; let cos = inv_freq.cos()?.to_dtype(dtype)?; let sin = inv_freq.sin()?.to_dtype(dtype)?; Ok(Self { cos, sin }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, subsampled_positions: Option<&Tensor>, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, _seq_len, _n_embd) = q.dims4()?; let (cos, sin) = match subsampled_positions { None => (&self.cos, &self.sin), Some(pos) => ( &self.cos.index_select(pos, 0)?, &self.sin.index_select(pos, 0)?, ), }; let q_embed = candle_nn::rotary_emb::rope(q, cos, sin)?; let k_embed = candle_nn::rotary_emb::rope(k, cos, sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] pub struct Model { patch_conv: candle_nn::Conv2d, ln_pre: RmsNorm, transformer: Transformer, patch_positional_embedding: RotaryEmbedding, max_image_width: u32, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let conv2d_cfg = candle_nn::Conv2dConfig { stride: cfg.patch_size, ..Default::default() }; let patch_conv = candle_nn::conv2d_no_bias( cfg.num_channels, cfg.hidden_size, cfg.patch_size, conv2d_cfg, vb.pp("patch_conv"), )?; let ln_pre = candle_nn::rms_norm(cfg.hidden_size, 1e-5, vb.pp("ln_pre"))?; let transformer = Transformer::new(cfg, vb.pp("transformer"))?; let patch_positional_embedding = RotaryEmbedding::new(cfg, vb.pp("patch_positional_embedding"))?; let max_image_width = (cfg.image_size / cfg.patch_size) as u32; Ok(Self { patch_conv, ln_pre, transformer, patch_positional_embedding, max_image_width, }) } pub fn position_ids_in_meshgrid( &self, num_patches_h: usize, num_patches_w: usize, device: &Device, ) -> Result<Tensor> { let idx = Tensor::arange(0, num_patches_h as u32, device)?; let idy = Tensor::arange(0, num_patches_w as u32, device)?; let mesh = Tensor::meshgrid(&[idx, idy], false)?; let ids = (&mesh[0] * (self.max_image_width as f64) + &mesh[1])?.flatten_all()?; Ok(ids) } } impl Module for Model { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let patch_embeds = xs.apply(&self.patch_conv)?; let subsampled_positions = Some(self.position_ids_in_meshgrid( patch_embeds.dim(2)?, patch_embeds.dim(3)?, patch_embeds.device(), )?); let patch_embeds = patch_embeds.flatten_from(2)?.t()?.apply(&self.ln_pre)?; self.transformer.forward( &patch_embeds, &self.patch_positional_embedding, subsampled_positions.as_ref(), None, ) } }
candle/candle-transformers/src/models/pixtral/vision_model.rs/0
{ "file_path": "candle/candle-transformers/src/models/pixtral/vision_model.rs", "repo_id": "candle", "token_count": 5788 }
//! Module for quantized StableLM implementation. //! //! StableLM is a series of open-source large language models //! optimized for performance and stability. This implementation //! provides quantization support for efficient model deployment. //! //! Key characteristics: //! - RMSNorm for layer normalization //! - Rotary positional embeddings (RoPE) //! - Support for 8-bit quantization //! //! References: //! - [StableLM](https://github.com/Stability-AI/StableLM) //! use crate::quantized_nn::{layer_norm, linear, linear_no_bias, Embedding, Linear}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, LayerNorm}; use std::sync::Arc; pub use crate::models::stable_lm::Config; use crate::models::stable_lm::RotaryEmbedding; #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, span: tracing::Span, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, span: tracing::span!(tracing::Level::TRACE, "mlp"), }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, use_cache: bool, rotary_ndims: usize, span: tracing::Span, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let head_dim = cfg.head_dim(); let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let linear_layer = if cfg.use_qkv_bias { linear } else { linear_no_bias }; let q_proj = linear_layer(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups: cfg.num_kv_groups(), head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, use_cache: cfg.use_cache, rotary_ndims: cfg.rotary_ndims(), span: tracing::span!(tracing::Level::TRACE, "attn"), }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (rot_ndims, pass_ndims) = (self.rotary_ndims, self.head_dim - self.rotary_ndims); let query_rot = query_states.narrow(D::Minus1, 0, rot_ndims)?; let query_pass = query_states.narrow(D::Minus1, rot_ndims, pass_ndims)?; let key_rot = key_states.narrow(D::Minus1, 0, rot_ndims)?; let key_pass = key_states.narrow(D::Minus1, rot_ndims, pass_ndims)?; let (query_rot, key_rot) = self.rotary_emb .apply_rotary_emb_qkv(&query_rot, &key_rot, seqlen_offset)?; let query_states = Tensor::cat(&[query_rot, query_pass], D::Minus1)?.contiguous()?; let key_states = Tensor::cat(&[key_rot, key_pass], D::Minus1)?.contiguous()?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; if self.use_cache { self.kv_cache = Some((key_states.clone(), value_states.clone())); } let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: LayerNorm, post_attention_layernorm: LayerNorm, span: tracing::Span, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb.pp("input_layernorm"), )?; let post_attention_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let _enter = self.span.enter(); let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: Embedding, layers: Vec<DecoderLayer>, norm: LayerNorm, lm_head: Linear, device: Device, span: tracing::Span, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(DType::F32, cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(DType::F32) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } }
candle/candle-transformers/src/models/quantized_stable_lm.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_stable_lm.rs", "repo_id": "candle", "token_count": 5352 }
use candle::{Result, Tensor}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; #[derive(Debug)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, out_proj: Linear, num_heads: usize, } impl Attention { fn new( embedding_dim: usize, num_heads: usize, downsample_rate: usize, vb: VarBuilder, ) -> Result<Self> { let internal_dim = embedding_dim / downsample_rate; let q_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("q_proj"))?; let k_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("k_proj"))?; let v_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("v_proj"))?; let out_proj = candle_nn::linear(internal_dim, embedding_dim, vb.pp("out_proj"))?; Ok(Self { q_proj, k_proj, v_proj, out_proj, num_heads, }) } fn separate_heads(&self, x: &Tensor) -> Result<Tensor> { let (b, n, c) = x.dims3()?; x.reshape((b, n, self.num_heads, c / self.num_heads))? .transpose(1, 2)? .contiguous() } fn recombine_heads(&self, x: &Tensor) -> Result<Tensor> { let (b, n_heads, n_tokens, c_per_head) = x.dims4()?; x.transpose(1, 2)? .reshape((b, n_tokens, n_heads * c_per_head)) } fn forward(&self, q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> { let q = self.q_proj.forward(&q.contiguous()?)?; let k = self.k_proj.forward(&k.contiguous()?)?; let v = self.v_proj.forward(&v.contiguous()?)?; let q = self.separate_heads(&q)?; let k = self.separate_heads(&k)?; let v = self.separate_heads(&v)?; let (_, _, _, c_per_head) = q.dims4()?; let attn = (q.matmul(&k.t()?)? / (c_per_head as f64).sqrt())?; let attn = candle_nn::ops::softmax_last_dim(&attn)?; let out = attn.matmul(&v)?; self.recombine_heads(&out)?.apply(&self.out_proj) } } #[derive(Debug)] struct TwoWayAttentionBlock { self_attn: Attention, norm1: LayerNorm, cross_attn_token_to_image: Attention, norm2: LayerNorm, mlp: super::MlpBlock, norm3: LayerNorm, norm4: LayerNorm, cross_attn_image_to_token: Attention, skip_first_layer_pe: bool, } impl TwoWayAttentionBlock { fn new( embedding_dim: usize, num_heads: usize, mlp_dim: usize, skip_first_layer_pe: bool, vb: VarBuilder, ) -> Result<Self> { let norm1 = layer_norm(embedding_dim, 1e-5, vb.pp("norm1"))?; let norm2 = layer_norm(embedding_dim, 1e-5, vb.pp("norm2"))?; let norm3 = layer_norm(embedding_dim, 1e-5, vb.pp("norm3"))?; let norm4 = layer_norm(embedding_dim, 1e-5, vb.pp("norm4"))?; let self_attn = Attention::new(embedding_dim, num_heads, 1, vb.pp("self_attn"))?; let cross_attn_token_to_image = Attention::new( embedding_dim, num_heads, 2, vb.pp("cross_attn_token_to_image"), )?; let cross_attn_image_to_token = Attention::new( embedding_dim, num_heads, 2, vb.pp("cross_attn_image_to_token"), )?; let mlp = super::MlpBlock::new( embedding_dim, mlp_dim, candle_nn::Activation::Relu, vb.pp("mlp"), )?; Ok(Self { self_attn, norm1, cross_attn_image_to_token, norm2, mlp, norm3, norm4, cross_attn_token_to_image, skip_first_layer_pe, }) } fn forward( &self, queries: &Tensor, keys: &Tensor, query_pe: &Tensor, key_pe: &Tensor, ) -> Result<(Tensor, Tensor)> { // Self attention block let queries = if self.skip_first_layer_pe { self.self_attn.forward(queries, queries, queries)? } else { let q = (queries + query_pe)?; let attn_out = self.self_attn.forward(&q, &q, queries)?; (queries + attn_out)? }; let queries = self.norm1.forward(&queries)?; // Cross attention block, tokens attending to image embedding let q = (&queries + query_pe)?; let k = (keys + key_pe)?; let attn_out = self.cross_attn_token_to_image.forward(&q, &k, keys)?; let queries = (&queries + attn_out)?; let queries = self.norm2.forward(&queries)?; // MLP block let mlp_out = self.mlp.forward(&queries); let queries = (queries + mlp_out)?; let queries = self.norm3.forward(&queries)?; // Cross attention block, image embedding attending to tokens let q = (&queries + query_pe)?; let k = (keys + key_pe)?; let attn_out = self.cross_attn_image_to_token.forward(&k, &q, &queries)?; let keys = (keys + attn_out)?; let keys = self.norm4.forward(&keys)?; Ok((queries, keys)) } } #[derive(Debug)] pub struct TwoWayTransformer { layers: Vec<TwoWayAttentionBlock>, final_attn_token_to_image: Attention, norm_final_attn: LayerNorm, } impl TwoWayTransformer { pub fn new( depth: usize, embedding_dim: usize, num_heads: usize, mlp_dim: usize, vb: VarBuilder, ) -> Result<Self> { let vb_l = vb.pp("layers"); let mut layers = Vec::with_capacity(depth); for i in 0..depth { let layer = TwoWayAttentionBlock::new(embedding_dim, num_heads, mlp_dim, i == 0, vb_l.pp(i))?; layers.push(layer) } let final_attn_token_to_image = Attention::new( embedding_dim, num_heads, 2, vb.pp("final_attn_token_to_image"), )?; let norm_final_attn = layer_norm(embedding_dim, 1e-5, vb.pp("norm_final_attn"))?; Ok(Self { layers, final_attn_token_to_image, norm_final_attn, }) } pub fn forward( &self, image_embedding: &Tensor, image_pe: &Tensor, point_embedding: &Tensor, ) -> Result<(Tensor, Tensor)> { let image_embedding = image_embedding.flatten_from(2)?.permute((0, 2, 1))?; let image_pe = image_pe.flatten_from(2)?.permute((0, 2, 1))?; let mut queries = point_embedding.clone(); let mut keys = image_embedding; for layer in self.layers.iter() { (queries, keys) = layer.forward(&queries, &keys, point_embedding, &image_pe)? } let q = (&queries + point_embedding)?; let k = (&keys + image_pe)?; let attn_out = self.final_attn_token_to_image.forward(&q, &k, &keys)?; let queries = (queries + attn_out)?.apply(&self.norm_final_attn)?; Ok((queries, keys)) } }
candle/candle-transformers/src/models/segment_anything/transformer.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/transformer.rs", "repo_id": "candle", "token_count": 3597 }
//! Würstchen Efficient Diffusion Model //! //! Würstchen is an efficient diffusion model architecture for generating images using //! a two-stage approach with a small decoder and prior network. //! //! - 💻 [GH Link](https://github.com/dome272/Wuerstchen) //! - 🤗 [HF Link](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py) //! - 📝 [Paper](https://openreview.net/pdf?id=gU58AyJlYz) //! //! ## Example //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/wuerstchen/assets/cat.jpg" alt="" width=320> //! <p>"Anthropomorphic cat dressed as a fire fighter"</p> //! </div> pub mod attention_processor; pub mod common; pub mod ddpm; pub mod diffnext; pub mod paella_vq; pub mod prior;
candle/candle-transformers/src/models/wuerstchen/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/wuerstchen/mod.rs", "repo_id": "candle", "token_count": 302 }
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::{ generation::LogitsProcessor, models::{moondream, quantized_moondream}, }; use candle_wasm_example_moondream::console_log; use js_sys::Date; use serde::{Deserialize, Serialize}; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; enum SelectedModel { Moondream(moondream::Model), Quantized(quantized_moondream::Model), } #[wasm_bindgen] pub struct Model { model: SelectedModel, tokenizer: Tokenizer, logits_processor: LogitsProcessor, tokens: Vec<u32>, repeat_penalty: f32, repeat_last_n: usize, index: usize, bos_token: Option<Tensor>, image_embeddings: Option<Tensor>, } #[derive(Serialize, Deserialize)] struct Output { token: String, token_id: u32, } #[derive(Serialize, Deserialize)] struct InitInput { prompt: String, seed: u64, temp: f64, top_p: f64, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn load(weights: Vec<u8>, tokenizer: Vec<u8>, quantized: bool) -> Result<Model, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let device = Device::Cpu; let config = moondream::Config::v2(); console_log!("config loaded in {:?}", Date::now()); let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let start = Date::now(); console_log!("weights len: {:?}", weights.len()); let model = if quantized { let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf_buffer( &weights, &device, )?; console_log!("weights loaded"); let model = quantized_moondream::Model::new(&config, vb)?; SelectedModel::Quantized(model) } else { let device = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, device)?; let model = moondream::Model::new(&config, vb)?; SelectedModel::Moondream(model) }; console_log!("model loaded in {:?}s", (Date::now() - start) / 1000.); let logits_processor = LogitsProcessor::new(299792458, None, None); Ok(Self { model, tokenizer, tokens: vec![], logits_processor, repeat_penalty: 1., repeat_last_n: 64, bos_token: None, image_embeddings: None, index: 0, }) } pub fn set_image_embeddings(&mut self, image: Vec<u8>) -> Result<(), JsError> { let device = Device::Cpu; console_log!("loading image as tensor"); let start = Date::now(); let image: Tensor = self.load_image(image)?.to_device(&device)?; console_log!("image loaded in {:?}s", (Date::now() - start) / 1000.); let start = Date::now(); let image_embeds = &image.unsqueeze(0)?; let image_embeds = match &self.model { SelectedModel::Moondream(ref m) => image_embeds.apply(m.vision_encoder())?, SelectedModel::Quantized(ref m) => image_embeds.apply(m.vision_encoder())?, }; console_log!( "loaded and encoded the image {image:?} in {:?}", (Date::now() - start) / 1000. ); self.image_embeddings = Some(image_embeds); Ok(()) } #[wasm_bindgen] pub fn init_with_image_prompt(&mut self, input: JsValue) -> Result<JsValue, JsError> { let InitInput { prompt, seed, temp, top_p, repeat_penalty, repeat_last_n, verbose_prompt, } = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let device = Device::Cpu; let prompt = format!("\n\nQuestion: {0}\n\nAnswer:", prompt); match &mut self.model { SelectedModel::Moondream(m) => m.text_model.clear_kv_cache(), SelectedModel::Quantized(m) => m.text_model.clear_kv_cache(), }; let temp = if temp <= 0. { None } else { Some(temp) }; let top_p = if top_p <= 0. || top_p >= 1. { None } else { Some(top_p) }; self.logits_processor = LogitsProcessor::new(seed, temp, top_p); self.repeat_penalty = repeat_penalty; self.repeat_last_n = repeat_last_n; self.tokens.clear(); self.index = 0; // Moondream tokenizer bos_token is "<|endoftext|>" // https://huggingface.co/vikhyatk/moondream2/blob/main/special_tokens_map.json let special_token = match self.tokenizer.get_vocab(true).get("<|endoftext|>") { Some(token) => *token, None => return Err(JsError::new("BOS token not found in the tokenizer.")), }; self.bos_token = Some(Tensor::new(&[special_token], &device)?.unsqueeze(0)?); let tokens = self .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))?; if tokens.is_empty() { return Err(JsError::new( "Empty prompts are not supported in the Moondream model.", )); } if verbose_prompt { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('▁', " ").replace("<0x0A>", "\n"); println!("{id:7} -> '{token}'"); } } let tokens = tokens.get_ids().to_vec(); let text = match self.process(&tokens) { Ok(text) => text, Err(_e) => { console_log!("error decoding token"); Output { token: "".to_string(), token_id: 0, } } }; Ok(serde_wasm_bindgen::to_value(&text)?) } #[wasm_bindgen] pub fn next_token(&mut self) -> Result<JsValue, JsError> { let last_token = *self.tokens.last().unwrap(); let text = match self.process(&[last_token]) { Ok(text) => text, Err(_e) => { console_log!("error decoding token"); Output { token: "".to_string(), token_id: 0, } } }; Ok(serde_wasm_bindgen::to_value(&text)?) } } impl Model { fn load_image(&self, image: Vec<u8>) -> Result<Tensor, JsError> { let img = image::ImageReader::new(std::io::Cursor::new(image)) .with_guessed_format()? .decode() .map_err(|e| JsError::new(&e.to_string()))? .resize_to_fill(378, 378, image::imageops::FilterType::Triangle); // Adjusted to 378x378 let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (378, 378, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) .map_err(|e| JsError::new(&e.to_string())) } } impl Model { fn process(&mut self, tokens: &[u32]) -> Result<Output, JsError> { let image_embeddings = match &self.image_embeddings { Some(embeddings) => embeddings, None => return Err(JsError::new("Image embeddings are not set.")), }; let bos_token = match &self.bos_token { Some(token) => token, None => return Err(JsError::new("BOS token is not set.")), }; let device = Device::Cpu; let context_size = if self.index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &device)?.unsqueeze(0)?; let logits = if self.index > 0 { match self.model { SelectedModel::Moondream(ref mut model) => model.text_model.forward(&input)?, SelectedModel::Quantized(ref mut model) => model.text_model.forward(&input)?, } } else { match self.model { SelectedModel::Moondream(ref mut model) => { model .text_model .forward_with_img(bos_token, &input, image_embeddings)? } SelectedModel::Quantized(ref mut model) => { model .text_model .forward_with_img(bos_token, &input, image_embeddings)? } } }; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; self.tokens.push(next_token); let token = match self.tokenizer.decode(&[next_token], true) { Ok(token) => token, Err(e) => { console_log!("error decoding token: {:?}", e); "".to_string() } }; self.index += 1; Ok(Output { token, token_id: next_token, }) } } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/moondream/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/moondream/src/bin/m.rs", "repo_id": "candle", "token_count": 4976 }
use crate::console_log; use crate::worker::{ModelData, Segment, Worker, WorkerInput, WorkerOutput}; use js_sys::Date; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::JsFuture; use yew::{html, Component, Context, Html}; use yew_agent::{Bridge, Bridged}; const SAMPLE_NAMES: [&str; 6] = [ "audios/samples_jfk.wav", "audios/samples_a13.wav", "audios/samples_gb0.wav", "audios/samples_gb1.wav", "audios/samples_hp0.wav", "audios/samples_mm0.wav", ]; async fn fetch_url(url: &str) -> Result<Vec<u8>, JsValue> { use web_sys::{Request, RequestCache, RequestInit, RequestMode, Response}; let window = web_sys::window().ok_or("window")?; let opts = RequestInit::new(); opts.set_method("GET"); opts.set_mode(RequestMode::Cors); opts.set_cache(RequestCache::NoCache); let request = Request::new_with_str_and_init(url, &opts)?; let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?; // `resp_value` is a `Response` object. assert!(resp_value.is_instance_of::<Response>()); let resp: Response = resp_value.dyn_into()?; let data = JsFuture::from(resp.blob()?).await?; let blob = web_sys::Blob::from(data); let array_buffer = JsFuture::from(blob.array_buffer()).await?; let data = js_sys::Uint8Array::new(&array_buffer).to_vec(); Ok(data) } pub enum Msg { Run(usize), UpdateStatus(String), SetDecoder(ModelData), WorkerIn(WorkerInput), WorkerOut(Result<WorkerOutput, String>), } pub struct CurrentDecode { start_time: Option<f64>, } pub struct App { status: String, loaded: bool, segments: Vec<Segment>, current_decode: Option<CurrentDecode>, worker: Box<dyn Bridge<Worker>>, } async fn model_data_load() -> Result<ModelData, JsValue> { let quantized = false; let is_multilingual = false; let (tokenizer, mel_filters, weights, config) = if quantized { console_log!("loading quantized weights"); let tokenizer = fetch_url("quantized/tokenizer-tiny-en.json").await?; let mel_filters = fetch_url("mel_filters.safetensors").await?; let weights = fetch_url("quantized/model-tiny-en-q80.gguf").await?; let config = fetch_url("quantized/config-tiny-en.json").await?; (tokenizer, mel_filters, weights, config) } else { console_log!("loading float weights"); if is_multilingual { let mel_filters = fetch_url("mel_filters.safetensors").await?; let tokenizer = fetch_url("whisper-tiny/tokenizer.json").await?; let weights = fetch_url("whisper-tiny/model.safetensors").await?; let config = fetch_url("whisper-tiny/config.json").await?; (tokenizer, mel_filters, weights, config) } else { let mel_filters = fetch_url("mel_filters.safetensors").await?; let tokenizer = fetch_url("whisper-tiny.en/tokenizer.json").await?; let weights = fetch_url("whisper-tiny.en/model.safetensors").await?; let config = fetch_url("whisper-tiny.en/config.json").await?; (tokenizer, mel_filters, weights, config) } }; let timestamps = true; let _task = Some("transcribe".to_string()); console_log!("{}", weights.len()); Ok(ModelData { tokenizer, mel_filters, weights, config, quantized, timestamps, task: None, is_multilingual, language: None, }) } fn performance_now() -> Option<f64> { let window = web_sys::window()?; let performance = window.performance()?; Some(performance.now() / 1000.) } impl Component for App { type Message = Msg; type Properties = (); fn create(ctx: &Context<Self>) -> Self { let status = "loading weights".to_string(); let cb = { let link = ctx.link().clone(); move |e| link.send_message(Self::Message::WorkerOut(e)) }; let worker = Worker::bridge(std::rc::Rc::new(cb)); Self { status, segments: vec![], current_decode: None, worker, loaded: false, } } fn rendered(&mut self, ctx: &Context<Self>, first_render: bool) { if first_render { ctx.link().send_future(async { match model_data_load().await { Err(err) => { let status = format!("{err:?}"); Msg::UpdateStatus(status) } Ok(model_data) => Msg::SetDecoder(model_data), } }); } } fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool { match msg { Msg::SetDecoder(md) => { self.status = "weights loaded successfully!".to_string(); self.loaded = true; console_log!("loaded weights"); self.worker.send(WorkerInput::ModelData(md)); true } Msg::Run(sample_index) => { let sample = SAMPLE_NAMES[sample_index]; if self.current_decode.is_some() { self.status = "already decoding some sample at the moment".to_string() } else { let start_time = performance_now(); self.current_decode = Some(CurrentDecode { start_time }); self.status = format!("decoding {sample}"); self.segments.clear(); ctx.link().send_future(async move { match fetch_url(sample).await { Err(err) => { let output = Err(format!("decoding error: {err:?}")); // Mimic a worker output to so as to release current_decode Msg::WorkerOut(output) } Ok(wav_bytes) => Msg::WorkerIn(WorkerInput::DecodeTask { wav_bytes }), } }) } // true } Msg::WorkerOut(output) => { let dt = self.current_decode.as_ref().and_then(|current_decode| { current_decode.start_time.and_then(|start_time| { performance_now().map(|stop_time| stop_time - start_time) }) }); self.current_decode = None; match output { Ok(WorkerOutput::WeightsLoaded) => self.status = "weights loaded!".to_string(), Ok(WorkerOutput::Decoded(segments)) => { self.status = match dt { None => "decoding succeeded!".to_string(), Some(dt) => format!("decoding succeeded in {:.2}s", dt), }; self.segments = segments; } Err(err) => { self.status = format!("decoding error {err:?}"); } } true } Msg::WorkerIn(inp) => { self.worker.send(inp); true } Msg::UpdateStatus(status) => { self.status = status; true } } } fn view(&self, ctx: &Context<Self>) -> Html { html! { <div> <table> <thead> <tr> <th>{"Sample"}</th> <th></th> <th></th> </tr> </thead> <tbody> { SAMPLE_NAMES.iter().enumerate().map(|(i, name)| { html! { <tr> <th>{name}</th> <th><audio controls=true src={format!("./{name}")}></audio></th> { if self.loaded { html!(<th><button class="button" onclick={ctx.link().callback(move |_| Msg::Run(i))}> { "run" }</button></th>) }else{html!()} } </tr> } }).collect::<Html>() } </tbody> </table> <h2> {&self.status} </h2> { if !self.loaded{ html! { <progress id="progress-bar" aria-label="loading weights…"></progress> } } else if self.current_decode.is_some() { html! { <progress id="progress-bar" aria-label="decoding…"></progress> } } else { html!{ <blockquote> <p> { self.segments.iter().map(|segment| { html! { <> <i> { format!("{:.2}s-{:.2}s: (avg-logprob: {:.4}, no-speech-prob: {:.4})", segment.start, segment.start + segment.duration, segment.dr.avg_logprob, segment.dr.no_speech_prob, ) } </i> <br/ > {&segment.dr.text} <br/ > </> } }).collect::<Html>() } </p> </blockquote> } } } // Display the current date and time the page was rendered <p class="footer"> { "Rendered: " } { String::from(Date::new_0().to_string()) } </p> </div> } } }
candle/candle-wasm-examples/whisper/src/app.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/app.rs", "repo_id": "candle", "token_count": 5669 }
use candle_wasm_example_yolo::coco_classes; use candle_wasm_example_yolo::model::Bbox; use candle_wasm_example_yolo::worker::Model as M; use candle_wasm_example_yolo::worker::ModelPose as P; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Model { inner: M, } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn new(data: Vec<u8>, model_size: &str) -> Result<Model, JsError> { let inner = M::load_(data, model_size)?; Ok(Self { inner }) } #[wasm_bindgen] pub fn run( &self, image: Vec<u8>, conf_threshold: f32, iou_threshold: f32, ) -> Result<String, JsError> { let bboxes = self.inner.run(image, conf_threshold, iou_threshold)?; let mut detections: Vec<(String, Bbox)> = vec![]; for (class_index, bboxes_for_class) in bboxes.into_iter().enumerate() { for b in bboxes_for_class.into_iter() { detections.push((coco_classes::NAMES[class_index].to_string(), b)); } } let json = serde_json::to_string(&detections)?; Ok(json) } } #[wasm_bindgen] pub struct ModelPose { inner: P, } #[wasm_bindgen] impl ModelPose { #[wasm_bindgen(constructor)] pub fn new(data: Vec<u8>, model_size: &str) -> Result<ModelPose, JsError> { let inner = P::load_(data, model_size)?; Ok(Self { inner }) } #[wasm_bindgen] pub fn run( &self, image: Vec<u8>, conf_threshold: f32, iou_threshold: f32, ) -> Result<String, JsError> { let bboxes = self.inner.run(image, conf_threshold, iou_threshold)?; let json = serde_json::to_string(&bboxes)?; Ok(json) } } fn main() {}
candle/candle-wasm-examples/yolo/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/bin/m.rs", "repo_id": "candle", "token_count": 840 }
# Use .env.local to change these variables # DO NOT EDIT THIS FILE WITH SENSITIVE DATA ### MongoDB ### MONGODB_URL=#your mongodb URL here, use chat-ui-db image if you don't want to set this MONGODB_DB_NAME=chat-ui MONGODB_DIRECT_CONNECTION=false ### Endpoints config ### HF_API_ROOT=https://api-inference.huggingface.co/models # HF_TOKEN is used for a lot of things, not only for inference but also fetching tokenizers, etc. # We recommend using an HF_TOKEN even if you use a local endpoint. HF_TOKEN= #get it from https://huggingface.co/settings/token # API Keys for providers, you will need to specify models in the MODELS section but these keys can be kept secret OPENAI_API_KEY=#your openai api key here ANTHROPIC_API_KEY=#your anthropic api key here CLOUDFLARE_ACCOUNT_ID=#your cloudflare account id here CLOUDFLARE_API_TOKEN=#your cloudflare api token here COHERE_API_TOKEN=#your cohere api token here GOOGLE_GENAI_API_KEY=#your google genai api token here ### Models ### ## Models can support many different endpoints, check the documentation for more details MODELS=`[ { "name": "NousResearch/Hermes-3-Llama-3.1-8B", "description": "Nous Research's latest Hermes 3 release in 8B size.", "promptExamples": [ { "title": "Write an email from bullet list", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Assist in a task", "prompt": "How do I make a delicious lemon cheesecake?" } ] } ]` ## Text Embedding Models used for websearch # Default is a model that runs locally on CPU. TEXT_EMBEDDING_MODELS = `[ { "name": "Xenova/gte-small", "displayName": "Xenova/gte-small", "description": "Local embedding model running on the server.", "chunkCharLength": 512, "endpoints": [ { "type": "transformersjs" } ] } ]` ## Removed models, useful for migrating conversations # { name: string, displayName?: string, id?: string, transferTo?: string }` OLD_MODELS=`[]` ## Task model # name of the model used for tasks such as summarizing title, creating query, etc. # if not set, the first model in MODELS will be used TASK_MODEL= ### Authentication ### # Parameters to enable open id login OPENID_CONFIG= MESSAGES_BEFORE_LOGIN=# how many messages a user can send in a conversation before having to login. set to 0 to force login right away # if it's defined, only these emails will be allowed to use login ALLOWED_USER_EMAILS=`[]` # If it's defined, users with emails matching these domains will also be allowed to use login ALLOWED_USER_DOMAINS=`[]` # valid alternative redirect URLs for OAuth, used for HuggingChat apps ALTERNATIVE_REDIRECT_URLS=`[]` ### Cookies # name of the cookie used to store the session COOKIE_NAME=hf-chat # specify secure behaviour for cookies COOKIE_SAMESITE=# can be "lax", "strict", "none" or left empty COOKIE_SECURE=# set to true to only allow cookies over https ### Websearch ### ## API Keys used to activate search with web functionality. websearch is disabled if none are defined. choose one of the following: YDC_API_KEY=#your docs.you.com api key here SERPER_API_KEY=#your serper.dev api key here SERPAPI_KEY=#your serpapi key here SERPSTACK_API_KEY=#your serpstack api key here SEARCHAPI_KEY=#your searchapi api key here USE_LOCAL_WEBSEARCH=#set to true to parse google results yourself, overrides other API keys SEARXNG_QUERY_URL=# where '<query>' will be replaced with query keywords see https://docs.searxng.org/dev/search_api.html eg https://searxng.yourdomain.com/search?q=<query>&engines=duckduckgo,google&format=json BING_SUBSCRIPTION_KEY=#your key ## Websearch configuration PLAYWRIGHT_ADBLOCKER=true WEBSEARCH_ALLOWLIST=`[]` # if it's defined, allow websites from only this list. WEBSEARCH_BLOCKLIST=`[]` # if it's defined, block websites from this list. WEBSEARCH_JAVASCRIPT=true # CPU usage reduces by 60% on average by disabling javascript. Enable to improve website compatibility WEBSEARCH_TIMEOUT = 3500 # in milliseconds, determines how long to wait to load a page before timing out ENABLE_LOCAL_FETCH=false #set to true to allow fetches on the local network. /!\ Only enable this if you have the proper firewall rules to prevent SSRF attacks and understand the implications. ## Public app configuration ## PUBLIC_APP_GUEST_MESSAGE=# a message to the guest user. If not set, no message will be shown. Only used if you have authentication enabled. PUBLIC_APP_NAME=ChatUI # name used as title throughout the app PUBLIC_APP_ASSETS=chatui # used to find logos & favicons in static/$PUBLIC_APP_ASSETS PUBLIC_APP_DESCRIPTION=# description used throughout the app PUBLIC_APP_DATA_SHARING=# Set to 1 to enable an option in the user settings to share conversations with model authors PUBLIC_APP_DISCLAIMER=# Set to 1 to show a disclaimer on login page PUBLIC_APP_DISCLAIMER_MESSAGE=# Message to show on the login page PUBLIC_ANNOUNCEMENT_BANNERS=`[ { "title": "chat-ui is now open source!", "linkTitle": "check it out", "linkHref": "https://github.com/huggingface/chat-ui" } ]` PUBLIC_SMOOTH_UPDATES=false # set to true to enable smoothing of messages client-side, can be CPU intensive PUBLIC_ORIGIN=#https://huggingface.co PUBLIC_SHARE_PREFIX=#https://hf.co/chat # mostly huggingchat specific PUBLIC_GOOGLE_ANALYTICS_ID=#G-XXXXXXXX / Leave empty to disable PUBLIC_PLAUSIBLE_SCRIPT_URL=#/js/script.js / Leave empty to disable PUBLIC_APPLE_APP_ID=#1234567890 / Leave empty to disable ### Feature Flags ### LLM_SUMMARIZATION=true # generate conversation titles with LLMs ENABLE_ASSISTANTS=false #set to true to enable assistants feature ENABLE_ASSISTANTS_RAG=false # /!\ This will let users specify arbitrary URLs that the server will then request. Make sure you have the proper firewall rules in place. REQUIRE_FEATURED_ASSISTANTS=false # require featured assistants to show in the list COMMUNITY_TOOLS=false # set to true to enable community tools EXPOSE_API=true # make the /api routes available ALLOW_IFRAME=true # Allow the app to be embedded in an iframe ### Tools ### # Check out public config in `chart/env/prod.yaml` for more details TOOLS=`[]` ### Rate limits ### # See `src/lib/server/usageLimits.ts` # { # conversations: number, # how many conversations # messages: number, # how many messages in a conversation # assistants: number, # how many assistants # messageLength: number, # how long can a message be before we cut it off # messagesPerMinute: number, # how many messages per minute # tools: number # how many tools # } USAGE_LIMITS=`{}` ### HuggingFace specific ### # Let user authenticate with their HF token in the /api routes. This is only useful if you have OAuth configured with huggingface. USE_HF_TOKEN_IN_API=false ## Feature flag & admin settings # Used for setting early access & admin flags to users HF_ORG_ADMIN= HF_ORG_EARLY_ACCESS= WEBHOOK_URL_REPORT_ASSISTANT=#provide slack webhook url to get notified for reports/feature requests ### Metrics ### METRICS_ENABLED=false METRICS_PORT=5565 LOG_LEVEL=info ### Parquet export ### # Not in use anymore but useful to export conversations to a parquet file as a HuggingFace dataset PARQUET_EXPORT_DATASET= PARQUET_EXPORT_HF_TOKEN= ADMIN_API_SECRET=# secret to admin API calls, like computing usage stats or exporting parquet data ### Docker build variables ### # These values cannot be updated at runtime # They need to be passed when building the docker image # See https://github.com/huggingface/chat-ui/main/.github/workflows/deploy-prod.yml#L44-L47 APP_BASE="" # base path of the app, e.g. /chat, left blank as default PUBLIC_APP_COLOR=blue # can be any of tailwind colors: https://tailwindcss.com/docs/customizing-colors#default-color-palette ### Body size limit for SvelteKit https://svelte.dev/docs/kit/adapter-node#Environment-variables-BODY_SIZE_LIMIT BODY_SIZE_LIMIT=15728640 PUBLIC_COMMIT_SHA= ### LEGACY parameters HF_ACCESS_TOKEN=#LEGACY! Use HF_TOKEN instead ALLOW_INSECURE_COOKIES=false # LEGACY! Use COOKIE_SECURE and COOKIE_SAMESITE instead PARQUET_EXPORT_SECRET=#DEPRECATED, use ADMIN_API_SECRET instead RATE_LIMIT= # /!\ DEPRECATED definition of messages per minute. Use USAGE_LIMITS.messagesPerMinute instead OPENID_CLIENT_ID= OPENID_CLIENT_SECRET= OPENID_SCOPES="openid profile" # Add "email" for some providers like Google that do not provide preferred_username OPENID_NAME_CLAIM="name" # Change to "username" for some providers that do not provide name OPENID_PROVIDER_URL=https://huggingface.co # for Google, use https://accounts.google.com OPENID_TOLERANCE= OPENID_RESOURCE=
chat-ui/.env/0
{ "file_path": "chat-ui/.env", "repo_id": "chat-ui", "token_count": 2884 }
{{- if $.Values.networkPolicy.enabled }} apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: {{ include "name" . }} namespace: {{ .Release.Namespace }} spec: egress: - ports: - port: 53 protocol: UDP to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: kube-system podSelector: matchLabels: k8s-app: kube-dns - to: {{- range $ip := .Values.networkPolicy.allowedBlocks }} - ipBlock: cidr: {{ $ip | quote }} {{- end }} - to: - ipBlock: cidr: 0.0.0.0/0 except: - 10.0.0.0/8 - 172.16.0.0/12 - 192.168.0.0/16 - 169.254.169.254/32 podSelector: matchLabels: {{ include "labels.standard" . | nindent 6 }} policyTypes: - Egress {{- end }}
chat-ui/chart/templates/network-policy.yaml/0
{ "file_path": "chat-ui/chart/templates/network-policy.yaml", "repo_id": "chat-ui", "token_count": 494 }
# LangServe | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | No | | [Multimodal](../multimodal) | No | LangChain applications that are deployed using LangServe can be called with the following config: ```ini MODELS=`[ { "name": "summarization-chain", "displayName": "Summarization Chain" "endpoints" : [{ "type": "langserve", "url" : "http://127.0.0.1:8100", }] } ]` ```
chat-ui/docs/source/configuration/models/providers/langserve.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/langserve.md", "repo_id": "chat-ui", "token_count": 220 }
<script lang="ts"> import type { readAndCompressImage } from "browser-image-resizer"; import type { Model } from "$lib/types/Model"; import type { Assistant } from "$lib/types/Assistant"; import { onMount } from "svelte"; import { applyAction, enhance } from "$app/forms"; import { page } from "$app/state"; import { base } from "$app/paths"; import CarbonPen from "~icons/carbon/pen"; import CarbonUpload from "~icons/carbon/upload"; import CarbonHelpFilled from "~icons/carbon/help"; import CarbonSettingsAdjust from "~icons/carbon/settings-adjust"; import CarbonTools from "~icons/carbon/tools"; import { useSettingsStore } from "$lib/stores/settings"; import { isHuggingChat } from "$lib/utils/isHuggingChat"; import IconInternet from "./icons/IconInternet.svelte"; import TokensCounter from "./TokensCounter.svelte"; import HoverTooltip from "./HoverTooltip.svelte"; import { findCurrentModel } from "$lib/utils/models"; import AssistantToolPicker from "./AssistantToolPicker.svelte"; type ActionData = { error: boolean; errors: { field: string | number; message: string; }[]; } | null; type AssistantFront = Omit<Assistant, "_id" | "createdById"> & { _id: string }; interface Props { form: ActionData; assistant?: AssistantFront | undefined; models?: Model[]; } let { form = $bindable(), assistant = undefined, models = [] }: Props = $props(); let files: FileList | null = $state(null); const settings = useSettingsStore(); let modelId = $state(""); let systemPrompt = $state(assistant?.preprompt ?? ""); let dynamicPrompt = $state(assistant?.dynamicPrompt ?? false); let showModelSettings = $state(Object.values(assistant?.generateSettings ?? {}).some((v) => !!v)); let compress: typeof readAndCompressImage | null = $state(null); onMount(async () => { const module = await import("browser-image-resizer"); compress = module.readAndCompressImage; modelId = findCurrentModel(models, assistant ? assistant.modelId : $settings.activeModel).id; }); let inputMessage1 = $state(assistant?.exampleInputs[0] ?? ""); let inputMessage2 = $state(assistant?.exampleInputs[1] ?? ""); let inputMessage3 = $state(assistant?.exampleInputs[2] ?? ""); let inputMessage4 = $state(assistant?.exampleInputs[3] ?? ""); function resetErrors() { if (form) { form.errors = []; form.error = false; } } function onFilesChange(e: Event) { const inputEl = e.target as HTMLInputElement; if (inputEl.files?.length && inputEl.files[0].size > 0) { if (!inputEl.files[0].type.includes("image")) { inputEl.files = null; files = null; form = { error: true, errors: [{ field: "avatar", message: "Only images are allowed" }] }; return; } files = inputEl.files; resetErrors(); deleteExistingAvatar = false; } } function getError(field: string, returnForm: ActionData) { return returnForm?.errors.find((error) => error.field === field)?.message ?? ""; } let deleteExistingAvatar = $state(false); let loading = $state(false); let ragMode: false | "links" | "domains" | "all" = $state( assistant?.rag?.allowAllDomains ? "all" : (assistant?.rag?.allowedLinks?.length ?? 0 > 0) ? "links" : (assistant?.rag?.allowedDomains?.length ?? 0) > 0 ? "domains" : false ); let tools = $state(assistant?.tools ?? []); const regex = /{{\s?(get|post|url|today)(=.*?)?\s?}}/g; let templateVariables = $derived([...systemPrompt.matchAll(regex)]); let selectedModel = $derived(models.find((m) => m.id === modelId)); </script> <form method="POST" class="relative flex h-full flex-col overflow-y-auto p-4 md:p-8" enctype="multipart/form-data" use:enhance={async ({ formData }) => { loading = true; if (files?.[0] && files[0].size > 0 && compress) { await compress(files[0], { maxWidth: 500, maxHeight: 500, quality: 1, }).then((resizedImage) => { formData.set("avatar", resizedImage); }); } if (deleteExistingAvatar === true) { if (assistant?.avatar) { // if there is an avatar we explicitly removei t formData.set("avatar", "null"); } else { // else we just remove it from the input formData.delete("avatar"); } } else { if (files === null) { formData.delete("avatar"); } } formData.delete("ragMode"); if (ragMode === false || !page.data.enableAssistantsRAG) { formData.set("ragAllowAll", "false"); formData.set("ragLinkList", ""); formData.set("ragDomainList", ""); } else if (ragMode === "all") { formData.set("ragAllowAll", "true"); formData.set("ragLinkList", ""); formData.set("ragDomainList", ""); } else if (ragMode === "links") { formData.set("ragAllowAll", "false"); formData.set("ragDomainList", ""); } else if (ragMode === "domains") { formData.set("ragAllowAll", "false"); formData.set("ragLinkList", ""); } formData.set("tools", tools.join(",")); return async ({ result }) => { loading = false; await applyAction(result); }; }} > {#if assistant} <h2 class="text-xl font-semibold"> Edit Assistant: {assistant?.name ?? "assistant"} </h2> <p class="mb-6 text-sm text-gray-500"> Modifying an existing assistant will propagate the changes to all users. </p> {:else} <h2 class="text-xl font-semibold">Create new assistant</h2> <p class="mb-6 text-sm text-gray-500"> Create and share your own AI Assistant. All assistants are <span class="rounded-full border px-2 py-0.5 leading-none">public</span > </p> {/if} <div class="grid h-full w-full flex-1 grid-cols-2 gap-6 text-sm max-sm:grid-cols-1"> <div class="col-span-1 flex flex-col gap-4"> <div> <div class="mb-1 block pb-2 text-sm font-semibold">Avatar</div> <input type="file" accept="image/*" name="avatar" id="avatar" class="hidden" onchange={onFilesChange} /> {#if (files && files[0]) || (assistant?.avatar && !deleteExistingAvatar)} <div class="group relative mx-auto h-12 w-12"> {#if files && files[0]} <img src={URL.createObjectURL(files[0])} alt="avatar" class="crop mx-auto h-12 w-12 cursor-pointer rounded-full object-cover" /> {:else if assistant?.avatar} <img src="{base}/settings/assistants/{assistant._id}/avatar.jpg?hash={assistant.avatar}" alt="avatar" class="crop mx-auto h-12 w-12 cursor-pointer rounded-full object-cover" /> {/if} <label for="avatar" class="invisible absolute bottom-0 h-12 w-12 rounded-full bg-black bg-opacity-50 p-1 group-hover:visible hover:visible" > <CarbonPen class="mx-auto my-auto h-full cursor-pointer text-center text-white" /> </label> </div> <div class="mx-auto w-max pt-1"> <button type="button" onclick={(e) => { e.preventDefault(); e.stopPropagation(); files = null; deleteExistingAvatar = true; }} class="mx-auto w-max text-center text-xs text-gray-600 hover:underline" > Delete </button> </div> {:else} <div class="mb-1 flex w-max flex-row gap-4"> <label for="avatar" class="btn flex h-8 rounded-lg border bg-white px-3 py-1 text-gray-500 shadow-sm transition-all hover:bg-gray-100" > <CarbonUpload class="mr-2 text-xs " /> Upload </label> </div> {/if} <p class="text-xs text-red-500">{getError("avatar", form)}</p> </div> <label> <div class="mb-1 font-semibold">Name</div> <input name="name" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="Assistant Name" value={assistant?.name ?? ""} /> <p class="text-xs text-red-500">{getError("name", form)}</p> </label> <label> <div class="mb-1 font-semibold">Description</div> <textarea name="description" class="h-15 w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="It knows everything about python" value={assistant?.description ?? ""} ></textarea> <p class="text-xs text-red-500">{getError("description", form)}</p> </label> <label> <div class="mb-1 font-semibold">Model</div> <div class="flex gap-2"> <select name="modelId" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" bind:value={modelId} > {#each models.filter((model) => !model.unlisted) as model} <option value={model.id}>{model.displayName}</option> {/each} </select> <p class="text-xs text-red-500">{getError("modelId", form)}</p> <button type="button" class="flex aspect-square items-center gap-2 whitespace-nowrap rounded-lg border px-3 {showModelSettings ? 'border-blue-500/20 bg-blue-50 text-blue-600' : ''}" onclick={() => (showModelSettings = !showModelSettings)} ><CarbonSettingsAdjust class="text-xs" /></button > </div> <div class="mt-2 rounded-lg border border-blue-500/20 bg-blue-500/5 px-2 py-0.5" class:hidden={!showModelSettings} > <p class="text-xs text-red-500">{getError("inputMessage1", form)}</p> <div class="my-2 grid grid-cols-1 gap-2.5 sm:grid-cols-2 sm:grid-rows-2"> <label for="temperature" class="flex justify-between"> <span class="m-1 ml-0 flex items-center gap-1.5 whitespace-nowrap text-sm"> Temperature <HoverTooltip label="Temperature: Controls creativity, higher values allow more variety." > <CarbonHelpFilled class="inline text-xxs text-gray-500 group-hover/tooltip:text-blue-600" /> </HoverTooltip> </span> <input type="number" name="temperature" min="0.1" max="2" step="0.1" class="w-20 rounded-lg border-2 border-gray-200 bg-gray-100 px-2 py-1" placeholder={selectedModel?.parameters?.temperature?.toString() ?? "1"} value={assistant?.generateSettings?.temperature ?? ""} /> </label> <label for="top_p" class="flex justify-between"> <span class="m-1 ml-0 flex items-center gap-1.5 whitespace-nowrap text-sm"> Top P <HoverTooltip label="Top P: Sets word choice boundaries, lower values tighten focus." > <CarbonHelpFilled class="inline text-xxs text-gray-500 group-hover/tooltip:text-blue-600" /> </HoverTooltip> </span> <input type="number" name="top_p" class="w-20 rounded-lg border-2 border-gray-200 bg-gray-100 px-2 py-1" min="0.05" max="1" step="0.05" placeholder={selectedModel?.parameters?.top_p?.toString() ?? "1"} value={assistant?.generateSettings?.top_p ?? ""} /> </label> <label for="repetition_penalty" class="flex justify-between"> <span class="m-1 ml-0 flex items-center gap-1.5 whitespace-nowrap text-sm"> Repetition penalty <HoverTooltip label="Repetition penalty: Prevents reuse, higher values decrease repetition." > <CarbonHelpFilled class="inline text-xxs text-gray-500 group-hover/tooltip:text-blue-600" /> </HoverTooltip> </span> <input type="number" name="repetition_penalty" min="0.1" max="2" step="0.05" class="w-20 rounded-lg border-2 border-gray-200 bg-gray-100 px-2 py-1" placeholder={selectedModel?.parameters?.repetition_penalty?.toString() ?? "1.0"} value={assistant?.generateSettings?.repetition_penalty ?? ""} /> </label> <label for="top_k" class="flex justify-between"> <span class="m-1 ml-0 flex items-center gap-1.5 whitespace-nowrap text-sm"> Top K <HoverTooltip label="Top K: Restricts word options, lower values for predictability." > <CarbonHelpFilled class="inline text-xxs text-gray-500 group-hover/tooltip:text-blue-600" /> </HoverTooltip> </span> <input type="number" name="top_k" min="5" max="100" step="5" class="w-20 rounded-lg border-2 border-gray-200 bg-gray-100 px-2 py-1" placeholder={selectedModel?.parameters?.top_k?.toString() ?? "50"} value={assistant?.generateSettings?.top_k ?? ""} /> </label> </div> </div> </label> <label> <div class="mb-1 font-semibold">User start messages</div> <div class="grid gap-1.5 text-sm md:grid-cols-2"> <input name="exampleInput1" placeholder="Start Message 1" bind:value={inputMessage1} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" /> <input name="exampleInput2" placeholder="Start Message 2" bind:value={inputMessage2} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" /> <input name="exampleInput3" placeholder="Start Message 3" bind:value={inputMessage3} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" /> <input name="exampleInput4" placeholder="Start Message 4" bind:value={inputMessage4} class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" /> </div> <p class="text-xs text-red-500">{getError("inputMessage1", form)}</p> </label> {#if selectedModel?.tools} <div> <span class="text-smd font-semibold" >Tools <CarbonTools class="inline text-xs text-purple-600" /> <span class="ml-1 rounded bg-gray-100 px-1 py-0.5 text-xxs font-normal text-gray-600" >Experimental</span > </span> <p class="text-xs text-gray-500"> Choose up to 3 community tools that will be used with this assistant. </p> </div> <AssistantToolPicker bind:toolIds={tools} /> {/if} {#if page.data.enableAssistantsRAG} <div class="flex flex-col flex-nowrap pb-4"> <span class="mt-2 text-smd font-semibold" >Internet access <IconInternet classNames="inline text-sm text-blue-600" /> {#if isHuggingChat} <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/385" target="_blank" class="ml-0.5 rounded bg-gray-100 px-1 py-0.5 text-xxs font-normal text-gray-700 underline decoration-gray-400" >Give feedback</a > {/if} </span> <label class="mt-1"> <input checked={!ragMode} onchange={() => (ragMode = false)} type="radio" name="ragMode" value={false} /> <span class="my-2 text-sm" class:font-semibold={!ragMode}> Default </span> {#if !ragMode} <span class="block text-xs text-gray-500"> Assistant will not use internet to do information retrieval and will respond faster. Recommended for most Assistants. </span> {/if} </label> <label class="mt-1"> <input checked={ragMode === "all"} onchange={() => (ragMode = "all")} type="radio" name="ragMode" value={"all"} /> <span class="my-2 text-sm" class:font-semibold={ragMode === "all"}> Web search </span> {#if ragMode === "all"} <span class="block text-xs text-gray-500"> Assistant will do a web search on each user request to find information. </span> {/if} </label> <label class="mt-1"> <input checked={ragMode === "domains"} onchange={() => (ragMode = "domains")} type="radio" name="ragMode" value={false} /> <span class="my-2 text-sm" class:font-semibold={ragMode === "domains"}> Domains search </span> </label> {#if ragMode === "domains"} <span class="mb-2 text-xs text-gray-500"> Specify domains and URLs that the application can search, separated by commas. </span> <input name="ragDomainList" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="wikipedia.org,bbc.com" value={assistant?.rag?.allowedDomains?.join(",") ?? ""} /> <p class="text-xs text-red-500">{getError("ragDomainList", form)}</p> {/if} <label class="mt-1"> <input checked={ragMode === "links"} onchange={() => (ragMode = "links")} type="radio" name="ragMode" value={false} /> <span class="my-2 text-sm" class:font-semibold={ragMode === "links"}> Specific Links </span> </label> {#if ragMode === "links"} <span class="mb-2 text-xs text-gray-500"> Specify a maximum of 10 direct URLs that the Assistant will access. HTML & Plain Text only, separated by commas </span> <input name="ragLinkList" class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2" placeholder="https://raw.githubusercontent.com/huggingface/chat-ui/main/README.md" value={assistant?.rag?.allowedLinks.join(",") ?? ""} /> <p class="text-xs text-red-500">{getError("ragLinkList", form)}</p> {/if} </div> {/if} </div> <div class="relative col-span-1 flex h-full flex-col"> <div class="mb-1 flex justify-between text-sm"> <span class="block font-semibold"> Instructions (System Prompt) </span> {#if dynamicPrompt && templateVariables.length} <div class="relative"> <button type="button" class="peer rounded bg-blue-500/20 px-1 text-xs text-blue-600 focus:bg-blue-500/30 focus:text-blue-800 sm:text-sm" > {templateVariables.length} template variable{templateVariables.length > 1 ? "s" : ""} </button> <div class="invisible absolute right-0 top-6 z-10 rounded-lg border bg-white p-2 text-xs shadow-lg peer-focus:visible hover:visible sm:w-96" > Will perform a GET or POST request and inject the response into the prompt. Works better with plain text, csv or json content. {#each templateVariables as match} <div> <a href={match[1].toLowerCase() === "get" ? match[2] : "#"} target={match[1].toLowerCase() === "get" ? "_blank" : ""} class="text-gray-500 underline decoration-gray-300" > {match[1].toUpperCase()}: {match[2]} </a> </div> {/each} </div> </div> {/if} </div> <label class="pb-2 text-sm has-[:checked]:font-semibold"> <input type="checkbox" name="dynamicPrompt" bind:checked={dynamicPrompt} /> Dynamic Prompt <p class="mb-2 text-xs font-normal text-gray-500"> Allow the use of template variables {"{{get=https://example.com/path}}"} to insert dynamic content into your prompt by making GET requests to specified URLs on each inference. You can also send the user's message as the body of a POST request, using {"{{post=https://example.com/path}}"}. Use {"{{today}}"} to include the current date. </p> </label> <div class="relative mb-20 flex h-full flex-col gap-2"> <textarea name="preprompt" class="min-h-[8lh] flex-1 rounded-lg border-2 border-gray-200 bg-gray-100 p-2 text-sm" placeholder="You'll act as..." bind:value={systemPrompt} ></textarea> {#if modelId} {@const model = models.find((_model) => _model.id === modelId)} {#if model?.tokenizer && systemPrompt} <TokensCounter classNames="absolute bottom-4 right-4" prompt={systemPrompt} modelTokenizer={model.tokenizer} truncate={model?.parameters?.truncate} /> {/if} {/if} <p class="text-xs text-red-500">{getError("preprompt", form)}</p> </div> <div class="absolute bottom-6 flex w-full justify-end gap-2 md:right-0 md:w-fit"> <a href={assistant ? `${base}/settings/assistants/${assistant?._id}` : `${base}/settings`} class="flex items-center justify-center rounded-full bg-gray-200 px-5 py-2 font-semibold text-gray-600" > Cancel </a> <button type="submit" disabled={loading} aria-disabled={loading} class="flex items-center justify-center rounded-full bg-black px-8 py-2 font-semibold" class:bg-gray-200={loading} class:text-gray-600={loading} class:text-white={!loading} > {assistant ? "Save" : "Create"} </button> </div> </div> </div> </form>
chat-ui/src/lib/components/AssistantSettings.svelte/0
{ "file_path": "chat-ui/src/lib/components/AssistantSettings.svelte", "repo_id": "chat-ui", "token_count": 9229 }
<script lang="ts"> import { page } from "$app/stores"; import { getHref } from "$lib/utils/getHref"; import PaginationArrow from "./PaginationArrow.svelte"; interface Props { classNames?: string; numItemsPerPage: number; numTotalItems: number; } let { classNames = "", numItemsPerPage, numTotalItems }: Props = $props(); const ELLIPSIS_IDX = -1 as const; function getPageIndexes(pageIdx: number, nTotalPages: number) { let pageIdxs: number[] = []; const NUM_EXTRA_BUTTONS = 2; // The number of page links to show on either side of the current page link. const minIdx = 0; const maxIdx = nTotalPages - 1; pageIdxs = [pageIdx]; // forward for (let i = 1; i < NUM_EXTRA_BUTTONS + 1; i++) { const newPageIdx = pageIdx + i; if (newPageIdx > maxIdx) { continue; } pageIdxs.push(newPageIdx); } if (maxIdx - pageIdxs[pageIdxs.length - 1] > 1) { pageIdxs.push(...[ELLIPSIS_IDX, maxIdx]); } else if (maxIdx - pageIdxs[pageIdxs.length - 1] === 1) { pageIdxs.push(maxIdx); } // backward for (let i = 1; i < NUM_EXTRA_BUTTONS + 1; i++) { const newPageIdx = pageIdx - i; if (newPageIdx < minIdx) { continue; } pageIdxs.unshift(newPageIdx); } if (pageIdxs[0] - minIdx > 1) { pageIdxs.unshift(...[minIdx, ELLIPSIS_IDX]); } else if (pageIdxs[0] - minIdx === 1) { pageIdxs.unshift(minIdx); } return pageIdxs; } let numTotalPages = $derived(Math.ceil(numTotalItems / numItemsPerPage)); let pageIndex = $derived(parseInt($page.url.searchParams.get("p") ?? "0")); let pageIndexes = $derived(getPageIndexes(pageIndex, numTotalPages)); </script> {#if numTotalPages > 1} <nav> <ul class="flex select-none items-center justify-between space-x-2 text-gray-700 dark:text-gray-300 sm:justify-center {classNames}" > <li> <PaginationArrow href={getHref($page.url, { newKeys: { p: (pageIndex - 1).toString() } })} direction="previous" isDisabled={pageIndex - 1 < 0} /> </li> {#each pageIndexes as pageIdx} <li class="hidden sm:block"> <a class=" rounded-lg px-2.5 py-1 {pageIndex === pageIdx ? 'bg-gray-50 font-semibold ring-1 ring-inset ring-gray-200 dark:bg-gray-800 dark:text-yellow-500 dark:ring-gray-700' : ''} " class:pointer-events-none={pageIdx === ELLIPSIS_IDX || pageIndex === pageIdx} href={getHref($page.url, { newKeys: { p: pageIdx.toString() } })} > {pageIdx === ELLIPSIS_IDX ? "..." : pageIdx + 1} </a> </li> {/each} <li> <PaginationArrow href={getHref($page.url, { newKeys: { p: (pageIndex + 1).toString() } })} direction="next" isDisabled={pageIndex + 1 >= numTotalPages} /> </li> </ul> </nav> {/if}
chat-ui/src/lib/components/Pagination.svelte/0
{ "file_path": "chat-ui/src/lib/components/Pagination.svelte", "repo_id": "chat-ui", "token_count": 1249 }
<script lang="ts"> import { webSearchParameters } from "$lib/stores/webSearchParameters"; import CarbonInformation from "~icons/carbon/information"; import Switch from "./Switch.svelte"; const toggle = () => ($webSearchParameters.useSearch = !$webSearchParameters.useSearch); </script> <div class="flex h-8 cursor-pointer select-none items-center gap-2 rounded-lg border bg-white p-1.5 shadow-sm hover:shadow-none dark:border-gray-800 dark:bg-gray-900" onclick={toggle} onkeydown={toggle} aria-checked={$webSearchParameters.useSearch} aria-label="Web Search Toggle" role="switch" tabindex="0" > <Switch name="useSearch" bind:checked={$webSearchParameters.useSearch} /> <label for="useSearch" class="whitespace-nowrap text-sm text-gray-800 dark:text-gray-200"> Search web </label> <div class="group relative w-max"> <CarbonInformation class="text-xs text-gray-500" /> <div class="pointer-events-none absolute -top-20 left-1/2 w-max -translate-x-1/2 rounded-md bg-gray-100 p-2 opacity-0 transition-opacity group-hover:opacity-100 dark:bg-gray-800" > <p class="max-w-sm text-sm text-gray-800 dark:text-gray-200"> When enabled, the model will try to complement its answer with information queried from the web. </p> </div> </div> </div>
chat-ui/src/lib/components/WebSearchToggle.svelte/0
{ "file_path": "chat-ui/src/lib/components/WebSearchToggle.svelte", "repo_id": "chat-ui", "token_count": 448 }
<script lang="ts"> interface Props { classNames?: string; } let { classNames = "" }: Props = $props(); </script> <svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" class={classNames} fill="none" viewBox="0 0 26 23" > <path fill="url(#a)" d="M.93 10.65A10.17 10.17 0 0 1 11.11.48h4.67a9.45 9.45 0 0 1 0 18.89H4.53L1.62 22.2a.38.38 0 0 1-.69-.28V10.65Z" /> <path fill="#000" fill-rule="evenodd" d="M11.52 7.4a1.86 1.86 0 1 1-3.72 0 1.86 1.86 0 0 1 3.72 0Zm7.57 0a1.86 1.86 0 1 1-3.73 0 1.86 1.86 0 0 1 3.73 0ZM8.9 12.9a.55.55 0 0 0-.11.35.76.76 0 0 1-1.51 0c0-.95.67-1.94 1.76-1.94 1.09 0 1.76 1 1.76 1.94H9.3a.55.55 0 0 0-.12-.35c-.06-.07-.1-.08-.13-.08s-.08 0-.14.08Zm4.04 0a.55.55 0 0 0-.12.35h-1.51c0-.95.68-1.94 1.76-1.94 1.1 0 1.77 1 1.77 1.94h-1.51a.55.55 0 0 0-.12-.35c-.06-.07-.11-.08-.14-.08-.02 0-.07 0-.13.08Zm-1.89.79c-.02 0-.07-.01-.13-.08a.55.55 0 0 1-.12-.36h-1.5c0 .95.67 1.95 1.75 1.95 1.1 0 1.77-1 1.77-1.95h-1.51c0 .16-.06.28-.12.36-.06.07-.11.08-.14.08Zm4.04 0c-.03 0-.08-.01-.14-.08a.55.55 0 0 1-.12-.36h-1.5c0 .95.67 1.95 1.76 1.95 1.08 0 1.76-1 1.76-1.95h-1.51c0 .16-.06.28-.12.36-.06.07-.11.08-.13.08Zm1.76-.44c0-.16.05-.28.12-.35.06-.07.1-.08.13-.08s.08 0 .14.08c.06.07.11.2.11.35a.76.76 0 0 0 1.51 0c0-.95-.67-1.94-1.76-1.94-1.09 0-1.76 1-1.76 1.94h1.5Z" clip-rule="evenodd" /> <defs> <radialGradient id="a" cx="0" cy="0" r="1" gradientTransform="matrix(0 31.37 -34.85 0 13.08 -9.02)" gradientUnits="userSpaceOnUse" > <stop stop-color="#FFD21E" /> <stop offset="1" stop-color="red" /> </radialGradient> </defs> </svg>
chat-ui/src/lib/components/icons/IconDazzled.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconDazzled.svelte", "repo_id": "chat-ui", "token_count": 941 }
import { afterEach, assert, describe, expect, it } from "vitest"; import { migrations } from "./routines"; import { acquireLock, isDBLocked, refreshLock, releaseLock } from "./lock"; import { collections } from "$lib/server/database"; const LOCK_KEY = "migrations.test"; describe("migrations", () => { it("should not have duplicates guid", async () => { const guids = migrations.map((m) => m._id.toString()); const uniqueGuids = [...new Set(guids)]; expect(uniqueGuids.length).toBe(guids.length); }); it("should acquire only one lock on DB", async () => { const results = await Promise.all(new Array(1000).fill(0).map(() => acquireLock(LOCK_KEY))); const locks = results.filter((r) => r); const semaphores = await collections.semaphores.find({}).toArray(); expect(locks.length).toBe(1); expect(semaphores).toBeDefined(); expect(semaphores.length).toBe(1); expect(semaphores?.[0].key).toBe(LOCK_KEY); }); it("should read the lock correctly", async () => { const lockId = await acquireLock(LOCK_KEY); assert(lockId); expect(await isDBLocked(LOCK_KEY)).toBe(true); expect(!!(await acquireLock(LOCK_KEY))).toBe(false); await releaseLock(LOCK_KEY, lockId); expect(await isDBLocked(LOCK_KEY)).toBe(false); }); it("should refresh the lock", async () => { const lockId = await acquireLock(LOCK_KEY); assert(lockId); // get the updatedAt time const updatedAtInitially = (await collections.semaphores.findOne({}))?.updatedAt; await refreshLock(LOCK_KEY, lockId); const updatedAtAfterRefresh = (await collections.semaphores.findOne({}))?.updatedAt; expect(updatedAtInitially).toBeDefined(); expect(updatedAtAfterRefresh).toBeDefined(); expect(updatedAtInitially).not.toBe(updatedAtAfterRefresh); }); }); afterEach(async () => { await collections.semaphores.deleteMany({}); await collections.migrationResults.deleteMany({}); });
chat-ui/src/lib/migrations/migrations.spec.ts/0
{ "file_path": "chat-ui/src/lib/migrations/migrations.spec.ts", "repo_id": "chat-ui", "token_count": 665 }
import { z } from "zod"; import { embeddingEndpointTei, embeddingEndpointTeiParametersSchema, } from "./tei/embeddingEndpoints"; import { embeddingEndpointTransformersJS, embeddingEndpointTransformersJSParametersSchema, } from "./transformersjs/embeddingEndpoints"; import { embeddingEndpointOpenAI, embeddingEndpointOpenAIParametersSchema, } from "./openai/embeddingEndpoints"; import { embeddingEndpointHfApi, embeddingEndpointHfApiSchema } from "./hfApi/embeddingHfApi"; // parameters passed when generating text interface EmbeddingEndpointParameters { inputs: string[]; } export type Embedding = number[]; // type signature for the endpoint export type EmbeddingEndpoint = (params: EmbeddingEndpointParameters) => Promise<Embedding[]>; export const embeddingEndpointSchema = z.discriminatedUnion("type", [ embeddingEndpointTeiParametersSchema, embeddingEndpointTransformersJSParametersSchema, embeddingEndpointOpenAIParametersSchema, embeddingEndpointHfApiSchema, ]); type EmbeddingEndpointTypeOptions = z.infer<typeof embeddingEndpointSchema>["type"]; // generator function that takes in type discrimantor value for defining the endpoint and return the endpoint export type EmbeddingEndpointGenerator<T extends EmbeddingEndpointTypeOptions> = ( inputs: Extract<z.infer<typeof embeddingEndpointSchema>, { type: T }> ) => EmbeddingEndpoint | Promise<EmbeddingEndpoint>; // list of all endpoint generators export const embeddingEndpoints: { [Key in EmbeddingEndpointTypeOptions]: EmbeddingEndpointGenerator<Key>; } = { tei: embeddingEndpointTei, transformersjs: embeddingEndpointTransformersJS, openai: embeddingEndpointOpenAI, hfapi: embeddingEndpointHfApi, }; export default embeddingEndpoints;
chat-ui/src/lib/server/embeddingEndpoints/embeddingEndpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/embeddingEndpoints.ts", "repo_id": "chat-ui", "token_count": 544 }
import { VertexAI, HarmCategory, HarmBlockThreshold, type Content, type TextPart, } from "@google-cloud/vertexai"; import type { Endpoint, TextGenerationStreamOutputWithToolsAndWebSources } from "../endpoints"; import { z } from "zod"; import type { Message } from "$lib/types/Message"; import { createImageProcessorOptionsValidator, makeImageProcessor } from "../images"; import { createDocumentProcessorOptionsValidator, makeDocumentProcessor } from "../document"; export const endpointVertexParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), // allow optional and validate against emptiness type: z.literal("vertex"), location: z.string().default("europe-west1"), extraBody: z.object({ model_version: z.string() }).optional(), project: z.string(), apiEndpoint: z.string().optional(), safetyThreshold: z .enum([ HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED, HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, HarmBlockThreshold.BLOCK_NONE, HarmBlockThreshold.BLOCK_ONLY_HIGH, ]) .optional(), tools: z.array(z.any()).optional(), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: [ "image/png", "image/jpeg", "image/webp", "image/avif", "image/tiff", "image/gif", ], preferredMimeType: "image/webp", maxSizeInMB: 20, maxWidth: 4096, maxHeight: 4096, }), document: createDocumentProcessorOptionsValidator({ supportedMimeTypes: ["application/pdf", "text/plain"], maxSizeInMB: 20, }), }) .default({}), }); export function endpointVertex(input: z.input<typeof endpointVertexParametersSchema>): Endpoint { const { project, location, model, apiEndpoint, safetyThreshold, tools, multimodal, extraBody } = endpointVertexParametersSchema.parse(input); const vertex_ai = new VertexAI({ project, location, apiEndpoint, }); return async ({ messages, preprompt, generateSettings }) => { const parameters = { ...model.parameters, ...generateSettings }; const hasFiles = messages.some((message) => message.files && message.files.length > 0); const generativeModel = vertex_ai.getGenerativeModel({ model: extraBody?.model_version ?? model.id ?? model.name, safetySettings: safetyThreshold ? [ { category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold: safetyThreshold, }, { category: HarmCategory.HARM_CATEGORY_UNSPECIFIED, threshold: safetyThreshold, }, ] : undefined, generationConfig: { maxOutputTokens: parameters?.max_new_tokens ?? 4096, stopSequences: parameters?.stop, temperature: parameters?.temperature ?? 1, }, // tools and multimodal are mutually exclusive tools: !hasFiles ? tools : undefined, }); // Preprompt is the same as the first system message. let systemMessage = preprompt; if (messages[0].from === "system") { systemMessage = messages[0].content; messages.shift(); } const vertexMessages = await Promise.all( messages.map(async ({ from, content, files }: Omit<Message, "id">): Promise<Content> => { const imageProcessor = makeImageProcessor(multimodal.image); const documentProcessor = makeDocumentProcessor(multimodal.document); const processedFilesWithNull = files && files.length > 0 ? await Promise.all( files.map(async (file) => { if (file.mime.includes("image")) { const { image, mime } = await imageProcessor(file); return { file: image, mime }; } else if (file.mime === "application/pdf" || file.mime === "text/plain") { return documentProcessor(file); } return null; }) ) : []; const processedFiles = processedFilesWithNull.filter((file) => file !== null); return { role: from === "user" ? "user" : "model", parts: [ ...processedFiles.map((processedFile) => ({ inlineData: { data: processedFile.file.toString("base64"), mimeType: processedFile.mime, }, })), { text: content, }, ], }; }) ); const result = await generativeModel.generateContentStream({ contents: vertexMessages, systemInstruction: systemMessage ? { role: "system", parts: [ { text: systemMessage, }, ], } : undefined, }); let tokenId = 0; return (async function* () { let generatedText = ""; const webSources = []; for await (const data of result.stream) { if (!data?.candidates?.length) break; // Handle case where no candidates are present const candidate = data.candidates[0]; if (!candidate.content?.parts?.length) continue; // Skip if no parts are present const firstPart = candidate.content.parts.find((part) => "text" in part) as | TextPart | undefined; if (!firstPart) continue; // Skip if no text part is found const isLastChunk = !!candidate.finishReason; const candidateWebSources = candidate.groundingMetadata?.groundingChunks ?.map((chunk) => { const uri = chunk.web?.uri ?? chunk.retrievedContext?.uri; const title = chunk.web?.title ?? chunk.retrievedContext?.title; if (!uri || !title) { return null; } return { uri, title, }; }) .filter((source) => source !== null); if (candidateWebSources) { webSources.push(...candidateWebSources); } const content = firstPart.text; generatedText += content; const output: TextGenerationStreamOutputWithToolsAndWebSources = { token: { id: tokenId++, text: content, logprob: 0, special: isLastChunk, }, generated_text: isLastChunk ? generatedText : null, details: null, webSources, }; yield output; if (isLastChunk) break; } })(); }; } export default endpointVertex;
chat-ui/src/lib/server/endpoints/google/endpointVertex.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/google/endpointVertex.ts", "repo_id": "chat-ui", "token_count": 2595 }
import { runWebSearch } from "$lib/server/websearch/runWebSearch"; import { preprocessMessages } from "../endpoints/preprocessMessages"; import { generateTitleForConversation } from "./title"; import { assistantHasDynamicPrompt, assistantHasWebSearch, getAssistantById, processPreprompt, } from "./assistant"; import { getTools, runTools } from "./tools"; import type { WebSearch } from "$lib/types/WebSearch"; import { type MessageUpdate, MessageUpdateType, MessageUpdateStatus, } from "$lib/types/MessageUpdate"; import { generate } from "./generate"; import { mergeAsyncGenerators } from "$lib/utils/mergeAsyncGenerators"; import type { TextGenerationContext } from "./types"; import type { ToolResult } from "$lib/types/Tool"; import { toolHasName } from "../tools/utils"; import directlyAnswer from "../tools/directlyAnswer"; async function* keepAlive(done: AbortSignal): AsyncGenerator<MessageUpdate, undefined, undefined> { while (!done.aborted) { yield { type: MessageUpdateType.Status, status: MessageUpdateStatus.KeepAlive, }; await new Promise((resolve) => setTimeout(resolve, 100)); } } export async function* textGeneration(ctx: TextGenerationContext) { const done = new AbortController(); const titleGen = generateTitleForConversation(ctx.conv); const textGen = textGenerationWithoutTitle(ctx, done); const keepAliveGen = keepAlive(done.signal); // keep alive until textGen is done yield* mergeAsyncGenerators([titleGen, textGen, keepAliveGen]); } async function* textGenerationWithoutTitle( ctx: TextGenerationContext, done: AbortController ): AsyncGenerator<MessageUpdate, undefined, undefined> { yield { type: MessageUpdateType.Status, status: MessageUpdateStatus.Started, }; ctx.assistant ??= await getAssistantById(ctx.conv.assistantId); const { model, conv, messages, assistant, isContinue, webSearch, toolsPreference } = ctx; const convId = conv._id; let webSearchResult: WebSearch | undefined; // run websearch if: // - it's not continuing a previous message // - AND the model doesn't support tools and websearch is selected // - OR the assistant has websearch enabled (no tools for assistants for now) if (!isContinue && ((webSearch && !conv.assistantId) || assistantHasWebSearch(assistant))) { webSearchResult = yield* runWebSearch(conv, messages, assistant?.rag); } let preprompt = conv.preprompt; if (assistantHasDynamicPrompt(assistant) && preprompt) { preprompt = await processPreprompt(preprompt, messages.at(-1)?.content); if (messages[0].from === "system") messages[0].content = preprompt; } let toolResults: ToolResult[] = []; let tools = model.tools ? await getTools(toolsPreference, ctx.assistant) : undefined; if (tools) { const toolCallsRequired = tools.some((tool) => !toolHasName(directlyAnswer.name, tool)); if (toolCallsRequired) { toolResults = yield* runTools(ctx, tools, preprompt); } else tools = undefined; } const processedMessages = await preprocessMessages(messages, webSearchResult, convId); yield* generate({ ...ctx, messages: processedMessages }, toolResults, preprompt); done.abort(); }
chat-ui/src/lib/server/textGeneration/index.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/index.ts", "repo_id": "chat-ui", "token_count": 960 }
import { collapseString, sanitizeString } from "./utils/nlp"; import { stringifyHTMLElements, stringifyHTMLElementsUnformatted } from "./utils/stringify"; import { MarkdownElementType, tagNameMap, type HeaderElement, type MarkdownElement } from "./types"; import type { SerializedHTMLElement } from "../scrape/types"; interface ConversionState { defaultType: | MarkdownElementType.Paragraph | MarkdownElementType.BlockQuote | MarkdownElementType.UnorderedListItem | MarkdownElementType.OrderedListItem; listDepth: number; blockQuoteDepth: number; } export function htmlElementToMarkdownElements( parent: HeaderElement, elem: SerializedHTMLElement | string, prevState: ConversionState = { defaultType: MarkdownElementType.Paragraph, listDepth: 0, blockQuoteDepth: 0, } ): MarkdownElement | MarkdownElement[] { // Found text so create an element based on the previous state if (typeof elem === "string") { if (elem.trim().length === 0) return []; if ( prevState.defaultType === MarkdownElementType.UnorderedListItem || prevState.defaultType === MarkdownElementType.OrderedListItem ) { return { parent, type: prevState.defaultType, content: elem, depth: prevState.listDepth, }; } if (prevState.defaultType === MarkdownElementType.BlockQuote) { return { parent, type: prevState.defaultType, content: elem, depth: prevState.blockQuoteDepth, }; } return { parent, type: prevState.defaultType, content: elem }; } const type = tagNameMap[elem.tagName] ?? MarkdownElementType.Paragraph; // Update the state based on the current element const state: ConversionState = { ...prevState }; if (type === MarkdownElementType.UnorderedList || type === MarkdownElementType.OrderedList) { state.listDepth += 1; state.defaultType = type === MarkdownElementType.UnorderedList ? MarkdownElementType.UnorderedListItem : MarkdownElementType.OrderedListItem; } if (type === MarkdownElementType.BlockQuote) { state.defaultType = MarkdownElementType.BlockQuote; state.blockQuoteDepth += 1; } // Headers if (type === MarkdownElementType.Header) { return { parent, type, level: Number(elem.tagName[1]), content: collapseString(stringifyHTMLElements(elem.content)), children: [], }; } // Code blocks if (type === MarkdownElementType.CodeBlock) { return { parent, type, content: sanitizeString(stringifyHTMLElementsUnformatted(elem.content)), }; } // Typical case, we want to flatten the DOM and only create elements when we see text return elem.content.flatMap((el) => htmlElementToMarkdownElements(parent, el, state)); } export function mergeAdjacentElements(elements: MarkdownElement[]): MarkdownElement[] { return elements.reduce<MarkdownElement[]>((acc, elem) => { const last = acc[acc.length - 1]; if (last && last.type === MarkdownElementType.Paragraph && last.type === elem.type) { last.content += elem.content; return acc; } return [...acc, elem]; }, []); }
chat-ui/src/lib/server/websearch/markdown/fromHtml.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/fromHtml.ts", "repo_id": "chat-ui", "token_count": 1033 }
import { env } from "$env/dynamic/private"; import { isURL } from "$lib/utils/isUrl"; import type { WebSearchSource } from "$lib/types/WebSearch"; type SerpStackResponse = { organic_results: { title: string; url: string; snippet?: string; }[]; error?: string; }; export default async function searchSerpStack(query: string): Promise<WebSearchSource[]> { const response = await fetch( `http://api.serpstack.com/search?access_key=${env.SERPSTACK_API_KEY}&query=${query}&hl=en&gl=us`, { headers: { "Content-type": "application/json; charset=UTF-8" } } ); const data = (await response.json()) as SerpStackResponse; if (!response.ok) { throw new Error( data.error ?? `SerpStack API returned error code ${response.status} - ${response.statusText}` ); } return data.organic_results .filter(({ url }) => isURL(url)) .map(({ title, url, snippet }) => ({ title, link: url, text: snippet ?? "", })); }
chat-ui/src/lib/server/websearch/search/endpoints/serpStack.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/serpStack.ts", "repo_id": "chat-ui", "token_count": 344 }
import type { Conversation } from "./Conversation"; export type SharedConversation = Pick< Conversation, | "model" | "embeddingModel" | "title" | "rootMessageId" | "messages" | "preprompt" | "assistantId" | "createdAt" | "updatedAt" > & { _id: string; hash: string; };
chat-ui/src/lib/types/SharedConversation.ts/0
{ "file_path": "chat-ui/src/lib/types/SharedConversation.ts", "repo_id": "chat-ui", "token_count": 114 }
/** Takes an unknown error and attempts to convert it to a string */ export function stringifyError(error: unknown): string { if (error instanceof Error) return error.message; if (typeof error === "string") return error; if (typeof error === "object" && error !== null) { // try a few common properties if ("message" in error && typeof error.message === "string") return error.message; if ("body" in error && typeof error.body === "string") return error.body; if ("name" in error && typeof error.name === "string") return error.name; } return "Unknown error"; }
chat-ui/src/lib/utils/stringifyError.ts/0
{ "file_path": "chat-ui/src/lib/utils/stringifyError.ts", "repo_id": "chat-ui", "token_count": 167 }
import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { describe, expect, it } from "vitest"; // function used to insert conversations used for testing export const insertLegacyConversation = async () => { const res = await collections.conversations.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), title: "legacy conversation", model: "", embeddingModel: "", messages: [ { id: "1-1-1-1-1", from: "user", content: "Hello, world! I am a user", }, { id: "1-1-1-1-2", from: "assistant", content: "Hello, world! I am an assistant.", }, { id: "1-1-1-1-3", from: "user", content: "Hello, world! I am a user.", }, { id: "1-1-1-1-4", from: "assistant", content: "Hello, world! I am an assistant.", }, ], }); return res.insertedId; }; export const insertLinearBranchConversation = async () => { const res = await collections.conversations.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), title: "linear branch conversation", model: "", embeddingModel: "", rootMessageId: "1-1-1-1-1", messages: [ { id: "1-1-1-1-1", from: "user", content: "Hello, world! I am a user", ancestors: [], children: ["1-1-1-1-2"], }, { id: "1-1-1-1-2", from: "assistant", content: "Hello, world! I am an assistant.", ancestors: ["1-1-1-1-1"], children: ["1-1-1-1-3"], }, { id: "1-1-1-1-3", from: "user", content: "Hello, world! I am a user.", ancestors: ["1-1-1-1-1", "1-1-1-1-2"], children: ["1-1-1-1-4"], }, { id: "1-1-1-1-4", from: "assistant", content: "Hello, world! I am an assistant.", ancestors: ["1-1-1-1-1", "1-1-1-1-2", "1-1-1-1-3"], children: [], }, ], }); return res.insertedId; }; export const insertSideBranchesConversation = async () => { const res = await collections.conversations.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), title: "side branches conversation", model: "", embeddingModel: "", rootMessageId: "1-1-1-1-1", messages: [ { id: "1-1-1-1-1", from: "user", content: "Hello, world, root message!", ancestors: [], children: ["1-1-1-1-2", "1-1-1-1-5"], }, { id: "1-1-1-1-2", from: "assistant", content: "Hello, response to root message!", ancestors: ["1-1-1-1-1"], children: ["1-1-1-1-3"], }, { id: "1-1-1-1-3", from: "user", content: "Hello, follow up question!", ancestors: ["1-1-1-1-1", "1-1-1-1-2"], children: ["1-1-1-1-4"], }, { id: "1-1-1-1-4", from: "assistant", content: "Hello, response from follow up question!", ancestors: ["1-1-1-1-1", "1-1-1-1-2", "1-1-1-1-3"], children: [], }, { id: "1-1-1-1-5", from: "assistant", content: "Hello, alternative assistant answer!", ancestors: ["1-1-1-1-1"], children: ["1-1-1-1-6", "1-1-1-1-7"], }, { id: "1-1-1-1-6", from: "user", content: "Hello, follow up question to alternative answer!", ancestors: ["1-1-1-1-1", "1-1-1-1-5"], children: [], }, { id: "1-1-1-1-7", from: "user", content: "Hello, alternative follow up question to alternative answer!", ancestors: ["1-1-1-1-1", "1-1-1-1-5"], children: [], }, ], }); return res.insertedId; }; describe("inserting conversations", () => { it("should insert a legacy conversation", async () => { const id = await insertLegacyConversation(); expect(id).toBeDefined(); }); it("should insert a linear branch conversation", async () => { const id = await insertLinearBranchConversation(); expect(id).toBeDefined(); }); it("should insert a side branches conversation", async () => { const id = await insertSideBranchesConversation(); expect(id).toBeDefined(); }); });
chat-ui/src/lib/utils/tree/treeHelpers.spec.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/treeHelpers.spec.ts", "repo_id": "chat-ui", "token_count": 1864 }
import { authCondition } from "$lib/server/auth"; import type { Conversation } from "$lib/types/Conversation"; import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; export async function GET({ locals }) { if (locals.user?._id || locals.sessionId) { const settings = await collections.settings.findOne(authCondition(locals)); const conversations = await collections.conversations .find(authCondition(locals)) .sort({ updatedAt: -1 }) .project<Pick<Conversation, "assistantId">>({ assistantId: 1, }) .limit(300) .toArray(); const userAssistants = settings?.assistants?.map((assistantId) => assistantId.toString()) ?? []; const userAssistantsSet = new Set(userAssistants); const assistantIds = [ ...userAssistants.map((el) => new ObjectId(el)), ...(conversations.map((conv) => conv.assistantId).filter((el) => !!el) as ObjectId[]), ]; const assistants = await collections.assistants.find({ _id: { $in: assistantIds } }).toArray(); const res = assistants .filter((el) => userAssistantsSet.has(el._id.toString())) .map((el) => ({ ...el, _id: el._id.toString(), createdById: undefined, createdByMe: el.createdById.toString() === (locals.user?._id ?? locals.sessionId).toString(), })); return Response.json(res); } else { return Response.json({ message: "Must have session cookie" }, { status: 401 }); } }
chat-ui/src/routes/api/user/assistants/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/user/assistants/+server.ts", "repo_id": "chat-ui", "token_count": 509 }
import { base } from "$app/paths"; import { authCondition } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { redirect } from "@sveltejs/kit"; export const actions = { async delete({ locals }) { // double check we have a user to delete conversations for if (locals.user?._id || locals.sessionId) { await collections.conversations.deleteMany({ ...authCondition(locals), }); } redirect(303, `${base}/`); }, };
chat-ui/src/routes/conversations/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/conversations/+page.server.ts", "repo_id": "chat-ui", "token_count": 158 }
import { collections } from "$lib/server/database"; import { z } from "zod"; import { authCondition } from "$lib/server/auth"; import { DEFAULT_SETTINGS, type SettingsEditable } from "$lib/types/Settings"; import { toolFromConfigs } from "$lib/server/tools/index.js"; import { ObjectId } from "mongodb"; export async function POST({ request, locals }) { const body = await request.json(); const { ethicsModalAccepted, ...settings } = z .object({ shareConversationsWithModelAuthors: z .boolean() .default(DEFAULT_SETTINGS.shareConversationsWithModelAuthors), hideEmojiOnSidebar: z.boolean().default(DEFAULT_SETTINGS.hideEmojiOnSidebar), ethicsModalAccepted: z.boolean().optional(), activeModel: z.string().default(DEFAULT_SETTINGS.activeModel), customPrompts: z.record(z.string()).default({}), tools: z.array(z.string()).optional(), disableStream: z.boolean().default(false), directPaste: z.boolean().default(false), }) .parse(body) satisfies SettingsEditable; // make sure all tools exist // either in db or in config if (settings.tools) { const newTools = [ ...(await collections.tools .find({ _id: { $in: settings.tools.map((toolId) => new ObjectId(toolId)) } }) .project({ _id: 1 }) .toArray() .then((tools) => tools.map((tool) => tool._id.toString()))), ...toolFromConfigs .filter((el) => (settings?.tools ?? []).includes(el._id.toString())) .map((el) => el._id.toString()), ]; settings.tools = newTools; } await collections.settings.updateOne( authCondition(locals), { $set: { ...settings, ...(ethicsModalAccepted && { ethicsModalAcceptedAt: new Date() }), updatedAt: new Date(), }, $setOnInsert: { createdAt: new Date(), }, }, { upsert: true, } ); // return ok response return new Response(); }
chat-ui/src/routes/settings/(nav)/+server.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/+server.ts", "repo_id": "chat-ui", "token_count": 695 }
import { env } from "$env/dynamic/private"; import { authCondition } from "$lib/server/auth.js"; import { Database, collections } from "$lib/server/database.js"; import { toolFromConfigs } from "$lib/server/tools/index.js"; import { SortKey } from "$lib/types/Assistant.js"; import { ReviewStatus } from "$lib/types/Review"; import type { CommunityToolDB } from "$lib/types/Tool.js"; import type { User } from "$lib/types/User.js"; import { generateQueryTokens, generateSearchTokens } from "$lib/utils/searchTokens.js"; import { error } from "@sveltejs/kit"; import { ObjectId, type Filter } from "mongodb"; const NUM_PER_PAGE = 16; export const load = async ({ url, locals }) => { if (env.COMMUNITY_TOOLS !== "true") { error(403, "Community tools are not enabled"); } const username = url.searchParams.get("user"); const query = url.searchParams.get("q")?.trim() ?? null; const pageIndex = parseInt(url.searchParams.get("p") ?? "0"); const sort = url.searchParams.get("sort")?.trim() ?? SortKey.TRENDING; const createdByCurrentUser = locals.user?.username && locals.user.username === username; const activeOnly = url.searchParams.get("active") === "true"; const showUnfeatured = url.searchParams.get("showUnfeatured") === "true"; let user: Pick<User, "_id"> | null = null; if (username) { user = await collections.users.findOne<Pick<User, "_id">>( { username }, { projection: { _id: 1 } } ); if (!user) { error(404, `User "${username}" doesn't exist`); } } const settings = await collections.settings.findOne(authCondition(locals)); if (!settings && activeOnly) { error(404, "No user settings found"); } const queryTokens = !!query && generateQueryTokens(query); const filter: Filter<CommunityToolDB> = { ...(!createdByCurrentUser && !activeOnly && !(locals.user?.isAdmin && showUnfeatured) && { review: ReviewStatus.APPROVED }), ...(user && { createdById: user._id }), ...(queryTokens && { searchTokens: { $all: queryTokens } }), ...(activeOnly && { _id: { $in: (settings?.tools ?? []).map((key) => { return new ObjectId(key); }), }, }), }; const communityTools = await Database.getInstance() .getCollections() .tools.find(filter) .skip(NUM_PER_PAGE * pageIndex) .sort({ ...(sort === SortKey.TRENDING && { last24HoursUseCount: -1 }), useCount: -1, }) .limit(NUM_PER_PAGE) .toArray(); const configTools = toolFromConfigs .filter((tool) => !tool?.isHidden) .filter((tool) => { if (queryTokens) { return generateSearchTokens(tool.displayName).some((token) => queryTokens.some((queryToken) => queryToken.test(token)) ); } return true; }); const tools = [...(pageIndex == 0 && !username ? configTools : []), ...communityTools]; const numTotalItems = (await Database.getInstance().getCollections().tools.countDocuments(filter)) + toolFromConfigs.length; return { tools: JSON.parse(JSON.stringify(tools)) as CommunityToolDB[], numTotalItems, numItemsPerPage: NUM_PER_PAGE, query, sort, showUnfeatured, }; };
chat-ui/src/routes/tools/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/tools/+page.server.ts", "repo_id": "chat-ui", "token_count": 1098 }
{ "$schema": "https://vega.github.io/schema/vega-lite/v4.json", "data": { "values": "<DVC_METRIC_DATA>" }, "title": "<DVC_METRIC_TITLE>", "mark": "point", "encoding": { "x": { "field": "<DVC_METRIC_X>", "type": "quantitative", "title": "<DVC_METRIC_X_LABEL>" }, "y": { "field": "<DVC_METRIC_Y>", "type": "quantitative", "title": "<DVC_METRIC_Y_LABEL>", "scale": { "zero": false } }, "color": { "field": "rev", "type": "nominal" } } }
datasets/.dvc/plots/scatter.json/0
{ "file_path": "datasets/.dvc/plots/scatter.json", "repo_id": "datasets", "token_count": 402 }