code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def forward( self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[ZambaHybridDynamicCache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`. This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The concatenated tensor is then used as input of the pre-attention RMSNorm (see fig. 2 in https://arxiv.org/pdf/2405.16712). layer_idx (`int`): layer_idx in the forward pass. Used to distinguish Zamba's tied transformer layers. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. """ hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1) hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, **kwargs, ) # feed-forward (MLP) hidden_states = self.pre_ff_layernorm(hidden_states) hidden_states = self.feed_forward(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`. This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The concatenated tensor is then used as input of the pre-attention RMSNorm (see fig. 2 in https://arxiv.org/pdf/2405.16712). layer_idx (`int`): layer_idx in the forward pass. Used to distinguish Zamba's tied transformer layers. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence.
forward
python
huggingface/transformers
src/transformers/models/zamba/modeling_zamba.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba/modeling_zamba.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor] = None, layer_idx: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, causal_mask: Optional[torch.Tensor] = None, past_key_value: Optional[ZambaHybridDynamicCache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, transformer_hidden_states: Optional[torch.Tensor] = None, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. """ residual = hidden_states # `transformer_hidden_states` is the output from shared transformer + linear layer (see fig. 2 in https://arxiv.org/pdf/2405.16712). # `transformer_hidden_states` is then added to the input to the mamba layer below (as described in eq. (6) of https://arxiv.org/pdf/2405.16712). hidden_states = ( hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states ) hidden_states = self.input_layernorm(hidden_states) hidden_states = self.mamba( hidden_states=hidden_states, cache_params=past_key_value, attention_mask=attention_mask, ) self_attn_weights = None # residual connection after mamba hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (past_key_value,) return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence.
forward
python
huggingface/transformers
src/transformers/models/zamba/modeling_zamba.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba/modeling_zamba.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor] = None, layer_idx: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, causal_mask: Optional[torch.Tensor] = None, past_key_value: Optional[ZambaHybridDynamicCache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer. layer_idx (`int`): layer number. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. """ layer_outputs = self.shared_transf( hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=causal_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) transformer_hidden_states = layer_outputs[0] if output_attentions: self_attn_weights = layer_outputs[1] transformer_hidden_states = self.linear(transformer_hidden_states) layer_outputs = self.mamba_decoder( hidden_states, transformer_hidden_states=transformer_hidden_states, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) if output_attentions: layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:] return layer_outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer. layer_idx (`int`): layer number. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence.
forward
python
huggingface/transformers
src/transformers/models/zamba/modeling_zamba.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba/modeling_zamba.py
Apache-2.0
def _check_and_enable_flash_attn_2( cls, config, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[Union[str, Dict[str, int]]] = None, hard_check_only: bool = False, check_device_map: bool = False, ): """ Overloads `PreTrainedModel._check_and_enable_flash_attn_2` so as to DISABLE Flash Attention 2 by default on Zamba models. Flash attention 2 is currently not supported in the HuggingFace implementation of Zamba v1. """ config = super()._check_and_enable_flash_attn_2( config, torch_dtype, device_map, hard_check_only=hard_check_only, check_device_map=check_device_map ) # if using the default path -> swap sdpa by eager if not hard_check_only and config._attn_implementation == "flash_attention_2": config._attn_implementation = "eager" return config
Overloads `PreTrainedModel._check_and_enable_flash_attn_2` so as to DISABLE Flash Attention 2 by default on Zamba models. Flash attention 2 is currently not supported in the HuggingFace implementation of Zamba v1.
_check_and_enable_flash_attn_2
python
huggingface/transformers
src/transformers/models/zamba/modeling_zamba.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba/modeling_zamba.py
Apache-2.0
def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[ZambaHybridDynamicCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **loss_kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, ZambaForCausalLM >>> model = ZambaForCausalLM.from_pretrained("Zyphra/Zamba-7B-v1") >>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba-7B-v1") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, return_dict=return_dict, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, ZambaForCausalLM >>> model = ZambaForCausalLM.from_pretrained("Zyphra/Zamba-7B-v1") >>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba-7B-v1") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```
forward
python
huggingface/transformers
src/transformers/models/zamba/modeling_zamba.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba/modeling_zamba.py
Apache-2.0
def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
forward
python
huggingface/transformers
src/transformers/models/zamba/modeling_zamba.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba/modeling_zamba.py
Apache-2.0
def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" for layer_idx in range(len(self.key_cache)): device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.conv_states[layer_idx].device self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device)) device = self.ssm_states[layer_idx].device self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
Reorders the cache for beam search, given the selected beam indices.
reorder_cache
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # take any layer that contains cache and not empty tensor layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0: return 0 return self.key_cache[layer_idx].shape[-2]
Returns the sequence length of the cached states. A layer index can be optionally passed.
get_seq_length
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
repeat_kv
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1)
Rotates half the hidden dims of the input.
rotate_half
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed
Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
apply_rotary_pos_emb
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int): """ Padding x tensor with `pad_size` on the seq_len dim (dim=1) Assumes that we only have tensors of either size 4 or 3 """ pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0) return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0)
Padding x tensor with `pad_size` on the seq_len dim (dim=1) Assumes that we only have tensors of either size 4 or 3
pad_tensor_by_size
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def reshape_into_chunks(input_tensor, pad_size, chunk_size): """ Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and simultaneously splitting it into chunk sequences. Assumes that we only have tensors of either size 4 or 3 """ # [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...] input_tensor = pad_tensor_by_size(input_tensor, pad_size) if len(input_tensor.shape) == 3: # [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads] return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2]) else: # [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size] return input_tensor.reshape( input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3] )
Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and simultaneously splitting it into chunk sequences. Assumes that we only have tensors of either size 4 or 3
reshape_into_chunks
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def segment_sum(input_tensor): """ More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions. """ chunk_size = input_tensor.size(-1) # 1. expand input tensor to have an additional dimension and repeat along that dimension # [..., chunk_size] -> [..., chunk_size, chunk_size] input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size) # 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1) input_tensor = input_tensor.masked_fill(~mask, 0) # 3. compute actual cumsum tensor_segsum = torch.cumsum(input_tensor, dim=-2) # 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time) mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0) tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf) return tensor_segsum
More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions.
segment_sum
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def __init__(self, config: Zamba2Config, num_fwd_mem_blocks=None, block_id: Optional[int] = None): """ This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead. """ super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.num_fwd_mem_blocks = num_fwd_mem_blocks self.block_id = block_id self.gate_up_proj = nn.Linear(self.hidden_size, 2 * self.intermediate_size, bias=config.add_bias_linear) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear) self.act_fn = ACT2FN[config.hidden_act] self.gate_up_proj_adapter_list = nn.ModuleList([]) for i in range(self.num_fwd_mem_blocks): if i % config.num_mem_blocks == block_id: gate_up_proj_adapter = nn.Sequential( nn.Linear(self.config.hidden_size, self.config.adapter_rank, bias=False), nn.Linear(self.config.adapter_rank, 2 * self.intermediate_size, bias=False), ) else: gate_up_proj_adapter = nn.Identity() self.gate_up_proj_adapter_list.append(gate_up_proj_adapter) layer_block_map = config.hybrid_layer_ids self.layer_dic = {value: index for index, value in enumerate(layer_block_map)}
This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead.
__init__
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Zamba2HybridDynamicCache] = None, output_attentions: Optional[bool] = False, position_embeddings: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`. This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The concatenated tensor is then used as input of the pre-attention RMSNorm (see fig. 2 in https://arxiv.org/pdf/2405.16712). attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. """ hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1) hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.pre_ff_layernorm(hidden_states) hidden_states = self.feed_forward(hidden_states, layer_idx) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`. This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The concatenated tensor is then used as input of the pre-attention RMSNorm (see fig. 2 in https://arxiv.org/pdf/2405.16712). attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head.
forward
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor] = None, layer_idx: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, causal_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Zamba2HybridDynamicCache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, transformer_hidden_states: Optional[torch.Tensor] = None, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. """ residual = hidden_states # `transformer_hidden_states` is the output from shared transformer + linear layer (see fig. 2 in https://arxiv.org/pdf/2405.16712). # `transformer_hidden_states` is then added to the input to the mamba layer below (as described in eq. (6) of https://arxiv.org/pdf/2405.16712). hidden_states = ( hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states ) hidden_states = self.input_layernorm(hidden_states) hidden_states = self.mamba( hidden_states=hidden_states, cache_params=past_key_value, attention_mask=attention_mask, ) self_attn_weights = None # residual connection after mamba hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (past_key_value,) return outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence.
forward
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor] = None, layer_idx: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, causal_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Zamba2HybridDynamicCache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, position_embeddings: Optional[torch.LongTensor] = None, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer. layer_idx (`int`): layer number. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. """ layer_outputs = self.shared_transformer( hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=causal_mask, past_key_value=past_key_value, output_attentions=output_attentions, position_embeddings=position_embeddings, ) transformer_hidden_states = layer_outputs[0] if output_attentions: self_attn_weights = layer_outputs[1] transformer_hidden_states = self.linear(transformer_hidden_states) layer_outputs = self.mamba_decoder( hidden_states, transformer_hidden_states=transformer_hidden_states, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, position_embeddings=position_embeddings, ) if output_attentions: layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:] return layer_outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer. layer_idx (`int`): layer number. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head.
forward
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Zamba2HybridDynamicCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **loss_kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, Zamba2ForCausalLM >>> model = Zamba2ForCausalLM.from_pretrained("Zyphra/Zamba2-7B-v1") >>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-7B-v1") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, return_dict=return_dict, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, Zamba2ForCausalLM >>> model = Zamba2ForCausalLM.from_pretrained("Zyphra/Zamba2-7B-v1") >>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-7B-v1") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```
forward
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
forward
python
huggingface/transformers
src/transformers/models/zamba2/modeling_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modeling_zamba2.py
Apache-2.0
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # take any layer that contains cache and not empty tensor layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0: return 0 return self.key_cache[layer_idx].shape[-2]
Returns the sequence length of the cached states. A layer index can be optionally passed.
get_seq_length
python
huggingface/transformers
src/transformers/models/zamba2/modular_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modular_zamba2.py
Apache-2.0
def __init__(self, config: Zamba2Config, num_fwd_mem_blocks=None, block_id: Optional[int] = None): """ This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead. """ super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.num_fwd_mem_blocks = num_fwd_mem_blocks self.block_id = block_id self.gate_up_proj = nn.Linear(self.hidden_size, 2 * self.intermediate_size, bias=config.add_bias_linear) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear) self.act_fn = ACT2FN[config.hidden_act] self.gate_up_proj_adapter_list = nn.ModuleList([]) for i in range(self.num_fwd_mem_blocks): if i % config.num_mem_blocks == block_id: gate_up_proj_adapter = nn.Sequential( nn.Linear(self.config.hidden_size, self.config.adapter_rank, bias=False), nn.Linear(self.config.adapter_rank, 2 * self.intermediate_size, bias=False), ) else: gate_up_proj_adapter = nn.Identity() self.gate_up_proj_adapter_list.append(gate_up_proj_adapter) layer_block_map = config.hybrid_layer_ids self.layer_dic = {value: index for index, value in enumerate(layer_block_map)}
This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead.
__init__
python
huggingface/transformers
src/transformers/models/zamba2/modular_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modular_zamba2.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Zamba2HybridDynamicCache] = None, output_attentions: Optional[bool] = False, position_embeddings: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`. This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The concatenated tensor is then used as input of the pre-attention RMSNorm (see fig. 2 in https://arxiv.org/pdf/2405.16712). attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. """ hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1) hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.pre_ff_layernorm(hidden_states) hidden_states = self.feed_forward(hidden_states, layer_idx) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs
Args: hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`. This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The concatenated tensor is then used as input of the pre-attention RMSNorm (see fig. 2 in https://arxiv.org/pdf/2405.16712). attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head.
forward
python
huggingface/transformers
src/transformers/models/zamba2/modular_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modular_zamba2.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor] = None, layer_idx: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, causal_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Zamba2HybridDynamicCache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, position_embeddings: Optional[torch.LongTensor] = None, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer. layer_idx (`int`): layer number. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. """ layer_outputs = self.shared_transformer( hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=causal_mask, past_key_value=past_key_value, output_attentions=output_attentions, position_embeddings=position_embeddings, ) transformer_hidden_states = layer_outputs[0] if output_attentions: self_attn_weights = layer_outputs[1] transformer_hidden_states = self.linear(transformer_hidden_states) layer_outputs = self.mamba_decoder( hidden_states, transformer_hidden_states=transformer_hidden_states, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, position_embeddings=position_embeddings, ) if output_attentions: layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:] return layer_outputs
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer. layer_idx (`int`): layer number. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head.
forward
python
huggingface/transformers
src/transformers/models/zamba2/modular_zamba2.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zamba2/modular_zamba2.py
Apache-2.0
def convert_zoedepth_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): """ Copy/paste/tweak model's weights to our ZoeDepth structure. """ # define ZoeDepth configuration based on URL config, _ = get_zoedepth_config(model_name) # load original model original_model = torch.hub.load( "NielsRogge/ZoeDepth:understanding_zoedepth", model_name, pretrained=True, force_reload=True ) original_model.eval() state_dict = original_model.state_dict() print("Original state dict:") for name, param in state_dict.items(): print(name, param.shape) # read in qkv matrices read_in_q_k_v(state_dict, config) if model_name == "ZoeD_NK": read_in_q_k_v_metric_head(state_dict) # rename keys state_dict = convert_state_dict(state_dict) # remove certain keys remove_ignore_keys(state_dict) # load HuggingFace model model = ZoeDepthForDepthEstimation(config) model.load_state_dict(state_dict) model.eval() # verify image processor image = prepare_img() image_processor = ZoeDepthImageProcessor() pixel_values = image_processor(image, return_tensors="pt").pixel_values filepath = hf_hub_download( repo_id="nielsr/test-image", filename="zoedepth_pixel_values.pt", repo_type="dataset", ) original_pixel_values = torch.load(filepath, map_location="cpu", weights_only=True) assert torch.allclose(pixel_values, original_pixel_values) # verify logits # this was done on a resized version of the cats image (384x384) filepath = hf_hub_download( repo_id="nielsr/test-image", filename="zoedepth_pixel_values.pt", repo_type="dataset", revision="1865dbb81984f01c89e83eec10f8d07efd10743d", ) cats_pixel_values = torch.load(filepath, map_location="cpu", weights_only=True) depth = model(cats_pixel_values).predicted_depth # Verify logits # These were obtained by inserting the pixel_values at the patch embeddings of BEiT if model_name == "ZoeD_N": expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor([[1.0328, 1.0604, 1.0747], [1.0816, 1.1293, 1.1456], [1.1117, 1.1629, 1.1766]]) elif model_name == "ZoeD_K": expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor([[1.6567, 1.6852, 1.7065], [1.6707, 1.6764, 1.6713], [1.7195, 1.7166, 1.7118]]) elif model_name == "ZoeD_NK": expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor([[1.1228, 1.1079, 1.1382], [1.1807, 1.1658, 1.1891], [1.2344, 1.2094, 1.2317]]) print("Shape of depth:", depth.shape) print("First 3x3 slice of depth:", depth[0, :3, :3]) assert depth.shape == torch.Size(expected_shape) assert torch.allclose(depth[0, :3, :3], expected_slice, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and processor to {pytorch_dump_folder_path}") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model_name_to_repo_id = { "ZoeD_N": "zoedepth-nyu", "ZoeD_K": "zoedepth-kitti", "ZoeD_NK": "zoedepth-nyu-kitti", } print("Pushing model and processor to the hub...") repo_id = model_name_to_repo_id[model_name] model.push_to_hub(f"Intel/{repo_id}") image_processor = ZoeDepthImageProcessor() image_processor.push_to_hub(f"Intel/{repo_id}")
Copy/paste/tweak model's weights to our ZoeDepth structure.
convert_zoedepth_checkpoint
python
huggingface/transformers
src/transformers/models/zoedepth/convert_zoedepth_to_hf.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/convert_zoedepth_to_hf.py
Apache-2.0
def resize( self, image: np.ndarray, size: Dict[str, int], keep_aspect_ratio: bool = False, ensure_multiple_of: int = 1, resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is resized to a size that is a multiple of this value. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Target size of the output image. keep_aspect_ratio (`bool`, *optional*, defaults to `False`): If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. ensure_multiple_of (`int`, *optional*, defaults to 1): The image is resized to a size that is a multiple of this value. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size specified in `size`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image) data_format = data_format if data_format is not None else input_data_format size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}") output_size = get_resize_output_image_size( image, output_size=(size["height"], size["width"]), keep_aspect_ratio=keep_aspect_ratio, multiple=ensure_multiple_of, input_data_format=input_data_format, ) height, width = output_size torch_image = torch.from_numpy(image).unsqueeze(0) torch_image = torch_image.permute(0, 3, 1, 2) if input_data_format == "channels_last" else torch_image # TODO support align_corners=True in image_transforms.resize requires_backends(self, "torch") resample_to_mode = {PILImageResampling.BILINEAR: "bilinear", PILImageResampling.BICUBIC: "bicubic"} mode = resample_to_mode[resample] resized_image = nn.functional.interpolate( torch_image, (int(height), int(width)), mode=mode, align_corners=True ) resized_image = resized_image.squeeze().numpy() resized_image = to_channel_dimension_format( resized_image, data_format, input_channel_dim=ChannelDimension.FIRST ) return resized_image
Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is resized to a size that is a multiple of this value. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Target size of the output image. keep_aspect_ratio (`bool`, *optional*, defaults to `False`): If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. ensure_multiple_of (`int`, *optional*, defaults to 1): The image is resized to a size that is a multiple of this value. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size specified in `size`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred.
resize
python
huggingface/transformers
src/transformers/models/zoedepth/image_processing_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/image_processing_zoedepth.py
Apache-2.0
def pad_image( self, image: np.array, mode: PaddingMode = PaddingMode.REFLECT, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Pad an image as done in the original ZoeDepth implementation. Padding fixes the boundary artifacts in the output depth map. Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image. This function pads the input image and crops the prediction back to the original size / view. Args: image (`np.ndarray`): Image to pad. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ height, width = get_image_size(image, input_data_format) pad_height = int(np.sqrt(height / 2) * 3) pad_width = int(np.sqrt(width / 2) * 3) return pad( image, padding=((pad_height, pad_height), (pad_width, pad_width)), mode=mode, data_format=data_format, input_data_format=input_data_format, )
Pad an image as done in the original ZoeDepth implementation. Padding fixes the boundary artifacts in the output depth map. Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image. This function pads the input image and crops the prediction back to the original size / view. Args: image (`np.ndarray`): Image to pad. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
pad_image
python
huggingface/transformers
src/transformers/models/zoedepth/image_processing_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/image_processing_zoedepth.py
Apache-2.0
def preprocess( self, images: ImageInput, do_pad: Optional[bool] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_resize: Optional[bool] = None, size: Optional[int] = None, keep_aspect_ratio: Optional[bool] = None, ensure_multiple_of: Optional[int] = None, resample: PILImageResampling = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the input image. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. If `keep_aspect_ratio` is `True`, he image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. If `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value. keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`): If `True` and `do_resize=True`, the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. This ensures that the image is scaled down as little as possible while still fitting within the desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value by flooring the height and width to the nearest multiple of this value. ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`): If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring the height and width to the nearest multiple of this value. Works both with and without `keep_aspect_ratio` being set to `True`. Can be overridden by `ensure_multiple_of` in `preprocess`. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_pad: images = [self.pad_image(image=image, input_data_format=input_data_format) for image in images] if do_resize: images = [ self.resize( image=image, size=size, resample=resample, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, input_data_format=input_data_format, ) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the input image. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. If `keep_aspect_ratio` is `True`, he image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. If `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value. keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`): If `True` and `do_resize=True`, the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions. This ensures that the image is scaled down as little as possible while still fitting within the desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value by flooring the height and width to the nearest multiple of this value. ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`): If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring the height and width to the nearest multiple of this value. Works both with and without `keep_aspect_ratio` being set to `True`. Can be overridden by `ensure_multiple_of` in `preprocess`. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
preprocess
python
huggingface/transformers
src/transformers/models/zoedepth/image_processing_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/image_processing_zoedepth.py
Apache-2.0
def resize( self, images: "torch.Tensor", size: SizeDict, keep_aspect_ratio: bool = False, ensure_multiple_of: int = 1, interpolation: Optional["F.InterpolationMode"] = None, ) -> "torch.Tensor": """ Resize an image or batchd images to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is resized to a size that is a multiple of this value. Args: images (`torch.Tensor`): Images to resize. size (`Dict[str, int]`): Target size of the output image. keep_aspect_ratio (`bool`, *optional*, defaults to `False`): If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. ensure_multiple_of (`int`, *optional*, defaults to 1): The image is resized to a size that is a multiple of this value. interpolation (`F.InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size specified in `size`. """ if not size.height or not size.width: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size}") output_size = get_resize_output_image_size( images, output_size=(size.height, size.width), keep_aspect_ratio=keep_aspect_ratio, multiple=ensure_multiple_of, input_data_format=ChannelDimension.FIRST, ) height, width = output_size resized_images = torch.nn.functional.interpolate( images, (int(height), int(width)), mode=interpolation.value, align_corners=True ) return resized_images
Resize an image or batchd images to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is resized to a size that is a multiple of this value. Args: images (`torch.Tensor`): Images to resize. size (`Dict[str, int]`): Target size of the output image. keep_aspect_ratio (`bool`, *optional*, defaults to `False`): If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. ensure_multiple_of (`int`, *optional*, defaults to 1): The image is resized to a size that is a multiple of this value. interpolation (`F.InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`): Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size specified in `size`.
resize
python
huggingface/transformers
src/transformers/models/zoedepth/image_processing_zoedepth_fast.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/image_processing_zoedepth_fast.py
Apache-2.0
def _pad_images( self, images: "torch.Tensor", ): """ Args: image (`torch.Tensor`): Image to pad. """ height, width = get_image_size(images, channel_dim=ChannelDimension.FIRST) pad_height = int(np.sqrt(height / 2) * 3) pad_width = int(np.sqrt(width / 2) * 3) return F.pad(images, padding=(pad_width, pad_height), padding_mode="reflect")
Args: image (`torch.Tensor`): Image to pad.
_pad_images
python
huggingface/transformers
src/transformers/models/zoedepth/image_processing_zoedepth_fast.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/image_processing_zoedepth_fast.py
Apache-2.0
def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]: """ Args: hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`): List of hidden states from the backbone. """ batch_size = hidden_states[0].shape[0] # stack along batch dimension # shape (batch_size*num_stages, sequence_length + 1, hidden_size) hidden_states = torch.cat(hidden_states, dim=0) cls_token, hidden_states = hidden_states[:, 0], hidden_states[:, 1:] # reshape hidden_states to (batch_size*num_stages, num_channels, height, width) total_batch_size, sequence_length, num_channels = hidden_states.shape hidden_states = hidden_states.reshape(total_batch_size, patch_height, patch_width, num_channels) hidden_states = hidden_states.permute(0, 3, 1, 2).contiguous() if self.readout_type == "project": # reshape to (batch_size*num_stages, height*width, num_channels) hidden_states = hidden_states.flatten(2).permute((0, 2, 1)) readout = cls_token.unsqueeze(dim=1).expand_as(hidden_states) # concatenate the readout token to the hidden states # to get (batch_size*num_stages, height*width, 2*num_channels) hidden_states = torch.cat((hidden_states, readout), -1) elif self.readout_type == "add": hidden_states = hidden_states + cls_token.unsqueeze(-1) out = [] for stage_idx, hidden_state in enumerate(hidden_states.split(batch_size, dim=0)): if self.readout_type == "project": hidden_state = self.readout_projects[stage_idx](hidden_state) # reshape back to (batch_size, num_channels, height, width) hidden_state = hidden_state.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width) hidden_state = self.layers[stage_idx](hidden_state) out.append(hidden_state) return out
Args: hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`): List of hidden states from the backbone.
forward
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]: """ Args: hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`): List of hidden states from the backbone. """ if not isinstance(hidden_states, (tuple, list)): raise TypeError("hidden_states should be a tuple or list of tensors") if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.") # postprocess hidden states if self.reassemble_stage is not None: hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)] # fusion blocks output = self.fusion_stage(features) return output, features[-1]
Args: hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`): List of hidden states from the backbone.
forward
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def __init__(self, n_classes=256, act=torch.softmax): """Compute log binomial distribution for n_classes Args: n_classes (`int`, *optional*, defaults to 256): Number of output classes. act (`torch.nn.Module`, *optional*, defaults to `torch.softmax`): Activation function to apply to the output. """ super().__init__() self.k = n_classes self.act = act self.register_buffer("k_idx", torch.arange(0, n_classes).view(1, -1, 1, 1), persistent=False) self.register_buffer("k_minus_1", torch.tensor([self.k - 1]).view(1, -1, 1, 1), persistent=False)
Compute log binomial distribution for n_classes Args: n_classes (`int`, *optional*, defaults to 256): Number of output classes. act (`torch.nn.Module`, *optional*, defaults to `torch.softmax`): Activation function to apply to the output.
__init__
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def forward(self, probabilities, temperature=1.0, eps=1e-4): """Compute the log binomial distribution for probabilities. Args: probabilities (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): Tensor containing probabilities of each class. temperature (`float` or `torch.Tensor` of shape `(batch_size, num_channels, height, width)`, *optional*, defaults to 1): Temperature of distribution. eps (`float`, *optional*, defaults to 1e-4): Small number for numerical stability. Returns: `torch.Tensor` of shape `(batch_size, num_channels, height, width)`: Log binomial distribution logbinomial(p;t). """ if probabilities.ndim == 3: probabilities = probabilities.unsqueeze(1) # make it (batch_size, num_channels, height, width) one_minus_probabilities = torch.clamp(1 - probabilities, eps, 1) probabilities = torch.clamp(probabilities, eps, 1) y = ( log_binom(self.k_minus_1, self.k_idx) + self.k_idx * torch.log(probabilities) + (self.k_minus_1 - self.k_idx) * torch.log(one_minus_probabilities) ) return self.act(y / temperature, dim=1)
Compute the log binomial distribution for probabilities. Args: probabilities (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): Tensor containing probabilities of each class. temperature (`float` or `torch.Tensor` of shape `(batch_size, num_channels, height, width)`, *optional*, defaults to 1): Temperature of distribution. eps (`float`, *optional*, defaults to 1e-4): Small number for numerical stability. Returns: `torch.Tensor` of shape `(batch_size, num_channels, height, width)`: Log binomial distribution logbinomial(p;t).
forward
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def __init__( self, config, in_features, condition_dim, n_classes=256, bottleneck_factor=2, ): """Per-pixel MLP followed by a Conditional Log Binomial softmax. Args: in_features (`int`): Number of input channels in the main feature. condition_dim (`int`): Number of input channels in the condition feature. n_classes (`int`, *optional*, defaults to 256): Number of classes. bottleneck_factor (`int`, *optional*, defaults to 2): Hidden dim factor. """ super().__init__() bottleneck = (in_features + condition_dim) // bottleneck_factor self.mlp = nn.Sequential( nn.Conv2d(in_features + condition_dim, bottleneck, kernel_size=1, stride=1, padding=0), nn.GELU(), # 2 for probabilities linear norm, 2 for temperature linear norm nn.Conv2d(bottleneck, 2 + 2, kernel_size=1, stride=1, padding=0), nn.Softplus(), ) self.p_eps = 1e-4 self.max_temp = config.max_temp self.min_temp = config.min_temp self.log_binomial_transform = LogBinomialSoftmax(n_classes, act=torch.softmax)
Per-pixel MLP followed by a Conditional Log Binomial softmax. Args: in_features (`int`): Number of input channels in the main feature. condition_dim (`int`): Number of input channels in the condition feature. n_classes (`int`, *optional*, defaults to 256): Number of classes. bottleneck_factor (`int`, *optional*, defaults to 2): Hidden dim factor.
__init__
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def forward(self, main_feature, condition_feature): """ Args: main_feature (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): Main feature. condition_feature (torch.Tensor of shape `(batch_size, num_channels, height, width)`): Condition feature. Returns: `torch.Tensor`: Output log binomial distribution """ probabilities_and_temperature = self.mlp(torch.concat((main_feature, condition_feature), dim=1)) probabilities, temperature = ( probabilities_and_temperature[:, :2, ...], probabilities_and_temperature[:, 2:, ...], ) probabilities = probabilities + self.p_eps probabilities = probabilities[:, 0, ...] / (probabilities[:, 0, ...] + probabilities[:, 1, ...]) temperature = temperature + self.p_eps temperature = temperature[:, 0, ...] / (temperature[:, 0, ...] + temperature[:, 1, ...]) temperature = temperature.unsqueeze(1) temperature = (self.max_temp - self.min_temp) * temperature + self.min_temp return self.log_binomial_transform(probabilities, temperature)
Args: main_feature (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): Main feature. condition_feature (torch.Tensor of shape `(batch_size, num_channels, height, width)`): Condition feature. Returns: `torch.Tensor`: Output log binomial distribution
forward
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def __init__(self, config, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10): """Bin center regressor network. Can be "normed" or "unnormed". If "normed", bin centers are bounded on the (min_depth, max_depth) interval. Args: config (`int`): Model configuration. n_bins (`int`, *optional*, defaults to 16): Number of bin centers. mlp_dim (`int`, *optional*, defaults to 256): Hidden dimension. min_depth (`float`, *optional*, defaults to 1e-3): Min depth value. max_depth (`float`, *optional*, defaults to 10): Max depth value. """ super().__init__() self.in_features = config.bottleneck_features self.bin_centers_type = config.bin_centers_type self.min_depth = min_depth self.max_depth = max_depth self.conv1 = nn.Conv2d(self.in_features, mlp_dim, 1, 1, 0) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(mlp_dim, n_bins, 1, 1, 0) self.act2 = nn.ReLU(inplace=True) if self.bin_centers_type == "normed" else nn.Softplus()
Bin center regressor network. Can be "normed" or "unnormed". If "normed", bin centers are bounded on the (min_depth, max_depth) interval. Args: config (`int`): Model configuration. n_bins (`int`, *optional*, defaults to 16): Number of bin centers. mlp_dim (`int`, *optional*, defaults to 256): Hidden dimension. min_depth (`float`, *optional*, defaults to 1e-3): Min depth value. max_depth (`float`, *optional*, defaults to 10): Max depth value.
__init__
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def forward(self, x): """ Returns tensor of bin_width vectors (centers). One vector b for every pixel """ x = self.conv1(x) x = self.act1(x) x = self.conv2(x) bin_centers = self.act2(x) if self.bin_centers_type == "normed": bin_centers = bin_centers + 1e-3 bin_widths_normed = bin_centers / bin_centers.sum(dim=1, keepdim=True) # shape (batch_size, num_channels, height, width) bin_widths = (self.max_depth - self.min_depth) * bin_widths_normed # pad has the form (left, right, top, bottom, front, back) bin_widths = nn.functional.pad(bin_widths, (0, 0, 0, 0, 1, 0), mode="constant", value=self.min_depth) # shape (batch_size, num_channels, height, width) bin_edges = torch.cumsum(bin_widths, dim=1) bin_centers = 0.5 * (bin_edges[:, :-1, ...] + bin_edges[:, 1:, ...]) return bin_widths_normed, bin_centers else: return bin_centers, bin_centers
Returns tensor of bin_width vectors (centers). One vector b for every pixel
forward
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def __init__( self, config, n_bins, n_attractors=16, min_depth=1e-3, max_depth=10, memory_efficient=False, ): """ Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth) """ super().__init__() self.alpha = config.attractor_alpha self.gemma = config.attractor_gamma self.kind = config.attractor_kind self.n_attractors = n_attractors self.n_bins = n_bins self.min_depth = min_depth self.max_depth = max_depth self.memory_efficient = memory_efficient # MLP to predict attractor points in_features = mlp_dim = config.bin_embedding_dim self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(mlp_dim, n_attractors * 2, 1, 1, 0) # x2 for linear norm self.act2 = nn.ReLU(inplace=True)
Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)
__init__
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True): """ The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers and the attractor points (the latter are predicted by the MLP). Args: x (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): Feature block. prev_bin (`torch.Tensor` of shape `(batch_size, prev_number_of_bins, height, width)`): Previous bin centers normed. prev_bin_embedding (`torch.Tensor`, *optional*): Optional previous bin embeddings. interpolate (`bool`, *optional*, defaults to `True`): Whether to interpolate the previous bin embeddings to the size of the input features. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`]: New bin centers normed and scaled. """ if prev_bin_embedding is not None: if interpolate: prev_bin_embedding = nn.functional.interpolate( prev_bin_embedding, x.shape[-2:], mode="bilinear", align_corners=True ) x = x + prev_bin_embedding x = self.conv1(x) x = self.act1(x) x = self.conv2(x) attractors = self.act2(x) attractors = attractors + 1e-3 batch_size, _, height, width = attractors.shape attractors = attractors.view(batch_size, self.n_attractors, 2, height, width) # batch_size, num_attractors, 2, height, width # note: original repo had a bug here: https://github.com/isl-org/ZoeDepth/blame/edb6daf45458569e24f50250ef1ed08c015f17a7/zoedepth/models/layers/attractor.py#L105C9-L106C50 # we include the bug to maintain compatibility with the weights attractors_normed = attractors[:, :, 0, ...] # batch_size, batch_size*num_attractors, height, width bin_centers = nn.functional.interpolate(prev_bin, (height, width), mode="bilinear", align_corners=True) # note: only attractor_type = "exp" is supported here, since no checkpoints were released with other attractor types if not self.memory_efficient: func = {"mean": torch.mean, "sum": torch.sum}[self.kind] # shape (batch_size, num_bins, height, width) delta_c = func(inv_attractor(attractors_normed.unsqueeze(2) - bin_centers.unsqueeze(1)), dim=1) else: delta_c = torch.zeros_like(bin_centers, device=bin_centers.device) for i in range(self.n_attractors): # shape (batch_size, num_bins, height, width) delta_c += inv_attractor(attractors_normed[:, i, ...].unsqueeze(1) - bin_centers) if self.kind == "mean": delta_c = delta_c / self.n_attractors bin_new_centers = bin_centers + delta_c bin_centers = (self.max_depth - self.min_depth) * bin_new_centers + self.min_depth bin_centers, _ = torch.sort(bin_centers, dim=1) bin_centers = torch.clip(bin_centers, self.min_depth, self.max_depth) return bin_new_centers, bin_centers
The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers and the attractor points (the latter are predicted by the MLP). Args: x (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): Feature block. prev_bin (`torch.Tensor` of shape `(batch_size, prev_number_of_bins, height, width)`): Previous bin centers normed. prev_bin_embedding (`torch.Tensor`, *optional*): Optional previous bin embeddings. interpolate (`bool`, *optional*, defaults to `True`): Whether to interpolate the previous bin embeddings to the size of the input features. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`]: New bin centers normed and scaled.
forward
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def __init__( self, config, n_bins, n_attractors=16, min_depth=1e-3, max_depth=10, memory_efficient=True, ): """ Attractor layer for bin centers. Bin centers are unbounded """ super().__init__() self.n_attractors = n_attractors self.n_bins = n_bins self.min_depth = min_depth self.max_depth = max_depth self.alpha = config.attractor_alpha self.gamma = config.attractor_alpha self.kind = config.attractor_kind self.memory_efficient = memory_efficient in_features = mlp_dim = config.bin_embedding_dim self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0) self.act2 = nn.Softplus()
Attractor layer for bin centers. Bin centers are unbounded
__init__
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True): """ The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers and the attractor points (the latter are predicted by the MLP). Args: x (`torch.Tensor` of shape (batch_size, num_channels, height, width)`): Feature block. prev_bin (`torch.Tensor` of shape (batch_size, prev_num_bins, height, width)`): Previous bin centers normed. prev_bin_embedding (`torch.Tensor`, *optional*): Optional previous bin embeddings. interpolate (`bool`, *optional*, defaults to `True`): Whether to interpolate the previous bin embeddings to the size of the input features. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`]: New bin centers unbounded. Two outputs just to keep the API consistent with the normed version. """ if prev_bin_embedding is not None: if interpolate: prev_bin_embedding = nn.functional.interpolate( prev_bin_embedding, x.shape[-2:], mode="bilinear", align_corners=True ) x = x + prev_bin_embedding x = self.conv1(x) x = self.act1(x) x = self.conv2(x) attractors = self.act2(x) height, width = attractors.shape[-2:] bin_centers = nn.functional.interpolate(prev_bin, (height, width), mode="bilinear", align_corners=True) if not self.memory_efficient: func = {"mean": torch.mean, "sum": torch.sum}[self.kind] # shape batch_size, num_bins, height, width delta_c = func(inv_attractor(attractors.unsqueeze(2) - bin_centers.unsqueeze(1)), dim=1) else: delta_c = torch.zeros_like(bin_centers, device=bin_centers.device) for i in range(self.n_attractors): # shape batch_size, num_bins, height, width delta_c += inv_attractor(attractors[:, i, ...].unsqueeze(1) - bin_centers) if self.kind == "mean": delta_c = delta_c / self.n_attractors bin_new_centers = bin_centers + delta_c bin_centers = bin_new_centers return bin_new_centers, bin_centers
The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers and the attractor points (the latter are predicted by the MLP). Args: x (`torch.Tensor` of shape (batch_size, num_channels, height, width)`): Feature block. prev_bin (`torch.Tensor` of shape (batch_size, prev_num_bins, height, width)`): Previous bin centers normed. prev_bin_embedding (`torch.Tensor`, *optional*): Optional previous bin embeddings. interpolate (`bool`, *optional*, defaults to `True`): Whether to interpolate the previous bin embeddings to the size of the input features. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`]: New bin centers unbounded. Two outputs just to keep the API consistent with the normed version.
forward
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def __init__(self, in_features, out_features, mlp_dim=128): """Projector MLP. Args: in_features (`int`): Number of input channels. out_features (`int`): Number of output channels. mlp_dim (`int`, *optional*, defaults to 128): Hidden dimension. """ super().__init__() self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0) self.act = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(mlp_dim, out_features, 1, 1, 0)
Projector MLP. Args: in_features (`int`): Number of input channels. out_features (`int`): Number of output channels. mlp_dim (`int`, *optional*, defaults to 128): Hidden dimension.
__init__
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def __init__(self, config): """ViT-like transformer block Args: config (`ZoeDepthConfig`): Model configuration class defining the model architecture. """ super().__init__() in_channels = config.bottleneck_features self.transformer_encoder = nn.ModuleList( [ZoeDepthTransformerEncoderLayer(config) for _ in range(config.num_patch_transformer_layers)] ) self.embedding_convPxP = nn.Conv2d( in_channels, config.patch_transformer_hidden_size, kernel_size=1, stride=1, padding=0 )
ViT-like transformer block Args: config (`ZoeDepthConfig`): Model configuration class defining the model architecture.
__init__
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def positional_encoding_1d(self, batch_size, sequence_length, embedding_dim, device="cpu", dtype=torch.float32): """Generate positional encodings Args: sequence_length (int): Sequence length embedding_dim (int): Embedding dimension Returns: torch.Tensor: Positional encodings. """ position = torch.arange(0, sequence_length, dtype=dtype, device=device).unsqueeze(1) index = torch.arange(0, embedding_dim, 2, dtype=dtype, device=device).unsqueeze(0) div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim)) pos_encoding = position * div_term pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1) pos_encoding = pos_encoding.unsqueeze(dim=0).repeat(batch_size, 1, 1) return pos_encoding
Generate positional encodings Args: sequence_length (int): Sequence length embedding_dim (int): Embedding dimension Returns: torch.Tensor: Positional encodings.
positional_encoding_1d
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def forward(self, x): """Forward pass Args: x (torch.Tensor - NCHW): Input feature tensor Returns: torch.Tensor - Transformer output embeddings of shape (batch_size, sequence_length, embedding_dim) """ embeddings = self.embedding_convPxP(x).flatten(2) # shape (batch_size, num_channels, sequence_length) # add an extra special CLS token at the start for global accumulation embeddings = nn.functional.pad(embeddings, (1, 0)) embeddings = embeddings.permute(0, 2, 1) batch_size, sequence_length, embedding_dim = embeddings.shape embeddings = embeddings + self.positional_encoding_1d( batch_size, sequence_length, embedding_dim, device=embeddings.device, dtype=embeddings.dtype ) for i in range(4): embeddings = self.transformer_encoder[i](embeddings) return embeddings
Forward pass Args: x (torch.Tensor - NCHW): Input feature tensor Returns: torch.Tensor - Transformer output embeddings of shape (batch_size, sequence_length, embedding_dim)
forward
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def forward( self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth depth estimation maps for computing the loss. Examples: ```python >>> from transformers import AutoImageProcessor, ZoeDepthForDepthEstimation >>> import torch >>> import numpy as np >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti") >>> model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # interpolate to original size >>> post_processed_output = image_processor.post_process_depth_estimation( ... outputs, ... source_sizes=[(image.height, image.width)], ... ) >>> # visualize the prediction >>> predicted_depth = post_processed_output[0]["predicted_depth"] >>> depth = predicted_depth * 255 / predicted_depth.max() >>> depth = depth.detach().cpu().numpy() >>> depth = Image.fromarray(depth.astype("uint8")) ```""" loss = None if labels is not None: raise NotImplementedError("Training is not implemented yet") return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions outputs = self.backbone.forward_with_filtered_kwargs( pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions ) hidden_states = outputs.feature_maps _, _, height, width = pixel_values.shape patch_size = self.patch_size patch_height = height // patch_size patch_width = width // patch_size hidden_states, features = self.neck(hidden_states, patch_height, patch_width) out = [features] + hidden_states relative_depth, features = self.relative_head(hidden_states) out = [features] + out metric_depth, domain_logits = self.metric_head( outconv_activation=out[0], bottleneck=out[1], feature_blocks=out[2:], relative_depth=relative_depth ) metric_depth = metric_depth.squeeze(dim=1) if not return_dict: if domain_logits is not None: output = (metric_depth, domain_logits) + outputs[1:] else: output = (metric_depth,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ZoeDepthDepthEstimatorOutput( loss=loss, predicted_depth=metric_depth, domain_logits=domain_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth depth estimation maps for computing the loss. Examples: ```python >>> from transformers import AutoImageProcessor, ZoeDepthForDepthEstimation >>> import torch >>> import numpy as np >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti") >>> model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # interpolate to original size >>> post_processed_output = image_processor.post_process_depth_estimation( ... outputs, ... source_sizes=[(image.height, image.width)], ... ) >>> # visualize the prediction >>> predicted_depth = post_processed_output[0]["predicted_depth"] >>> depth = predicted_depth * 255 / predicted_depth.max() >>> depth = depth.detach().cpu().numpy() >>> depth = Image.fromarray(depth.astype("uint8")) ```
forward
python
huggingface/transformers
src/transformers/models/zoedepth/modeling_zoedepth.py
https://github.com/huggingface/transformers/blob/master/src/transformers/models/zoedepth/modeling_zoedepth.py
Apache-2.0
def values_override(self) -> Optional[Mapping[str, Any]]: """ Dictionary of keys to override in the model's config before exporting Returns: Dictionary with the keys (and their corresponding values) to override """ if hasattr(self._config, "use_cache"): return {"use_cache": False} return None
Dictionary of keys to override in the model's config before exporting Returns: Dictionary with the keys (and their corresponding values) to override
values_override
python
huggingface/transformers
src/transformers/onnx/config.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/config.py
Apache-2.0
def is_torch_support_available(self) -> bool: """ The minimum PyTorch version required to export the model. Returns: `bool`: Whether the installed version of PyTorch is compatible with the model. """ if is_torch_available(): from transformers.utils import get_torch_version return version.parse(get_torch_version()) >= self.torch_onnx_minimum_version else: return False
The minimum PyTorch version required to export the model. Returns: `bool`: Whether the installed version of PyTorch is compatible with the model.
is_torch_support_available
python
huggingface/transformers
src/transformers/onnx/config.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/config.py
Apache-2.0
def use_external_data_format(num_parameters: int) -> bool: """ Flag indicating if the model requires using external data format Args: num_parameters: Number of parameter on the model Returns: True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise """ return ( compute_serialized_parameters_size(num_parameters, ParameterFormat.Float) >= EXTERNAL_DATA_FORMAT_SIZE_LIMIT )
Flag indicating if the model requires using external data format Args: num_parameters: Number of parameter on the model Returns: True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise
use_external_data_format
python
huggingface/transformers
src/transformers/onnx/config.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/config.py
Apache-2.0
def generate_dummy_inputs( self, preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin", "ImageProcessingMixin"], batch_size: int = -1, seq_length: int = -1, num_choices: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, num_channels: int = 3, image_width: int = 40, image_height: int = 40, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220, tokenizer: Optional["PreTrainedTokenizerBase"] = None, ) -> Mapping[str, Any]: """ Generate inputs to provide to the ONNX exporter for the specific framework Args: preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]): The preprocessor associated with this model configuration. batch_size (`int`, *optional*, defaults to -1): The batch size to export the model for (-1 means dynamic axis). num_choices (`int`, *optional*, defaults to -1): The number of candidate answers provided for multiple choice task (-1 means dynamic axis). seq_length (`int`, *optional*, defaults to -1): The sequence length to export the model for (-1 means dynamic axis). is_pair (`bool`, *optional*, defaults to `False`): Indicate if the input is a pair (sentence 1, sentence 2) framework (`TensorType`, *optional*, defaults to `None`): The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for. num_channels (`int`, *optional*, defaults to 3): The number of channels of the generated images. image_width (`int`, *optional*, defaults to 40): The width of the generated images. image_height (`int`, *optional*, defaults to 40): The height of the generated images. sampling_rate (`int`, *optional* defaults to 22050) The sampling rate for audio data generation. time_duration (`float`, *optional* defaults to 5.0) Total seconds of sampling for audio data generation. frequency (`int`, *optional* defaults to 220) The desired natural frequency of generated audio. Returns: Mapping[str, Tensor] holding the kwargs to provide to the model's forward function """ from ..feature_extraction_utils import FeatureExtractionMixin from ..image_processing_utils import ImageProcessingMixin from ..tokenization_utils_base import PreTrainedTokenizerBase if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and a preprocessor to generate dummy inputs.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.warning("Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.") preprocessor = tokenizer if isinstance(preprocessor, PreTrainedTokenizerBase): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = preprocessor.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence input_token = ( preprocessor.unk_token if (preprocessor.unk_token is not None and len(preprocessor.unk_token) > 0) else "0" ) dummy_input = [" ".join([input_token]) * seq_length] * batch_size if self.task == "multiple-choice": # If dynamic axis (-1) we forward with a fixed dimension of 4 candidate answers to avoid optimizations # made by ONNX num_choices = compute_effective_axis_dimension( num_choices, fixed_dimension=OnnxConfig.default_fixed_num_choices, num_token_to_add=0 ) dummy_input = dummy_input * num_choices # The shape of the tokenized inputs values is [batch_size * num_choices, seq_length] tokenized_input = preprocessor(dummy_input, text_pair=dummy_input) # Unflatten the tokenized inputs values expanding it to the shape [batch_size, num_choices, seq_length] for k, v in tokenized_input.items(): tokenized_input[k] = [v[i : i + num_choices] for i in range(0, len(v), num_choices)] return dict(tokenized_input.convert_to_tensors(tensor_type=framework)) return dict(preprocessor(dummy_input, return_tensors=framework)) elif isinstance(preprocessor, ImageProcessingMixin): if preprocessor.model_input_names[0] != "pixel_values": raise ValueError( f"The `preprocessor` is an image processor ({preprocessor.__class__.__name__}) and expects" f' `model_input_names[0]` to be "pixel_values", but got {preprocessor.model_input_names[0]}' ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) return dict(preprocessor(images=dummy_input, return_tensors=framework)) elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) return dict(preprocessor(images=dummy_input, return_tensors=framework)) elif ( isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "input_features" ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_audio(batch_size, sampling_rate, time_duration, frequency) return dict(preprocessor(dummy_input, return_tensors=framework)) else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
Generate inputs to provide to the ONNX exporter for the specific framework Args: preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]): The preprocessor associated with this model configuration. batch_size (`int`, *optional*, defaults to -1): The batch size to export the model for (-1 means dynamic axis). num_choices (`int`, *optional*, defaults to -1): The number of candidate answers provided for multiple choice task (-1 means dynamic axis). seq_length (`int`, *optional*, defaults to -1): The sequence length to export the model for (-1 means dynamic axis). is_pair (`bool`, *optional*, defaults to `False`): Indicate if the input is a pair (sentence 1, sentence 2) framework (`TensorType`, *optional*, defaults to `None`): The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for. num_channels (`int`, *optional*, defaults to 3): The number of channels of the generated images. image_width (`int`, *optional*, defaults to 40): The width of the generated images. image_height (`int`, *optional*, defaults to 40): The height of the generated images. sampling_rate (`int`, *optional* defaults to 22050) The sampling rate for audio data generation. time_duration (`float`, *optional* defaults to 5.0) Total seconds of sampling for audio data generation. frequency (`int`, *optional* defaults to 220) The desired natural frequency of generated audio. Returns: Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
generate_dummy_inputs
python
huggingface/transformers
src/transformers/onnx/config.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/config.py
Apache-2.0
def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[str, Any]: """ Flatten any potential nested structure expanding the name of the field with the index of the element within the structure. Args: name: The name of the nested structure field: The structure to, potentially, be flattened Returns: (Dict[str, Any]): Outputs with flattened structure and key mapping this new structure. """ from itertools import chain return {f"{name}.{idx}": item for idx, item in enumerate(chain.from_iterable(field))}
Flatten any potential nested structure expanding the name of the field with the index of the element within the structure. Args: name: The name of the nested structure field: The structure to, potentially, be flattened Returns: (Dict[str, Any]): Outputs with flattened structure and key mapping this new structure.
flatten_output_collection_property
python
huggingface/transformers
src/transformers/onnx/config.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/config.py
Apache-2.0
def num_layers(self) -> int: """ The number of layers attribute retrieved from the model config. Override this for model configs where the number of layers attribute is not called `num_layers`. """ if not hasattr(self._config, "num_layers"): raise AttributeError( "could not find the number of layers attribute in the model configuration, override the num_layers" " property of the model OnnxConfig to solve this" ) return self._config.num_layers
The number of layers attribute retrieved from the model config. Override this for model configs where the number of layers attribute is not called `num_layers`.
num_layers
python
huggingface/transformers
src/transformers/onnx/config.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/config.py
Apache-2.0
def num_attention_heads(self) -> int: """ The number of attention heads attribute retrieved from the model config. Override this for model configs where the number of attention heads attribute is not called `num_attention_heads`. """ if not hasattr(self._config, "num_attention_heads"): raise AttributeError( "could not find the number of attention heads attribute in the model configuration, override the" " num_attention_heads property of the model OnnxConfig to solve this" ) return self._config.num_attention_heads
The number of attention heads attribute retrieved from the model config. Override this for model configs where the number of attention heads attribute is not called `num_attention_heads`.
num_attention_heads
python
huggingface/transformers
src/transformers/onnx/config.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/config.py
Apache-2.0
def fill_with_past_key_values_( self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool = False ): """ Fill the input_or_outputs mapping with past_key_values dynamic axes considering. Args: inputs_or_outputs: The mapping to fill. direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the output mapping, this is important for axes naming. inverted_values_shape: If `True`, store values on dynamic axis 1, else on axis 2. """ if direction not in ["inputs", "outputs"]: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') name = "past_key_values" if direction == "inputs" else "present" for i in range(self.num_layers): inputs_or_outputs[f"{name}.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} if inverted_values_shape: inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 1: "past_sequence + sequence"} else: inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
Fill the input_or_outputs mapping with past_key_values dynamic axes considering. Args: inputs_or_outputs: The mapping to fill. direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the output mapping, this is important for axes naming. inverted_values_shape: If `True`, store values on dynamic axis 1, else on axis 2.
fill_with_past_key_values_
python
huggingface/transformers
src/transformers/onnx/config.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/config.py
Apache-2.0
def check_onnxruntime_requirements(minimum_version: Version): """ Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found """ try: import onnxruntime # Parse the version of the installed onnxruntime ort_version = parse(onnxruntime.__version__) # We require 1.4.0 minimum if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: raise ImportError( f"We found an older version of onnxruntime ({onnxruntime.__version__}) " f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n" "Please update onnxruntime by running `pip install --upgrade onnxruntime`" ) except ImportError: raise ImportError( "onnxruntime doesn't seem to be currently installed. " "Please install the onnxruntime by running `pip install onnxruntime`" " and relaunch the conversion." )
Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found
check_onnxruntime_requirements
python
huggingface/transformers
src/transformers/onnx/convert.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/convert.py
Apache-2.0
def export_pytorch( preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], model: "PreTrainedModel", config: OnnxConfig, opset: int, output: Path, tokenizer: Optional["PreTrainedTokenizer"] = None, device: str = "cpu", ) -> Tuple[List[str], List[str]]: """ Export a PyTorch model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): The preprocessor used for encoding the data. model ([`PreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. device (`str`, *optional*, defaults to `cpu`): The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration. """ if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.") preprocessor = tokenizer if issubclass(type(model), PreTrainedModel): import torch from torch.onnx import export as onnx_export logger.info(f"Using framework PyTorch: {torch.__version__}") with torch.no_grad(): model.config.return_dict = True model.eval() # Check if we need to override certain configuration item if config.values_override is not None: logger.info(f"Overriding {len(config.values_override)} configuration item(s)") for override_config_key, override_config_value in config.values_override.items(): logger.info(f"\t- {override_config_key} -> {override_config_value}") setattr(model.config, override_config_key, override_config_value) # Ensure inputs match # TODO: Check when exporting QA we provide "is_pair=True" model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH) device = torch.device(device) if device.type == "cuda" and torch.cuda.is_available(): model.to(device) model_inputs_device = {} for k, v in model_inputs.items(): if isinstance(v, Tuple): model_inputs_device[k] = tuple( x.to(device) if isinstance(x, torch.Tensor) else None for x in v ) elif isinstance(v, List): model_inputs_device[k] = [ tuple(x.to(device) if isinstance(x, torch.Tensor) else None for x in t) for t in v ] else: model_inputs_device[k] = v.to(device) model_inputs = model_inputs_device inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) if not inputs_match: raise ValueError("Model and config inputs doesn't match") config.patch_ops() onnx_export( model, (model_inputs,), f=output.as_posix(), input_names=list(config.inputs.keys()), output_names=onnx_outputs, dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), do_constant_folding=True, opset_version=opset, ) config.restore_ops() return matched_inputs, onnx_outputs
Export a PyTorch model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): The preprocessor used for encoding the data. model ([`PreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. device (`str`, *optional*, defaults to `cpu`): The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration.
export_pytorch
python
huggingface/transformers
src/transformers/onnx/convert.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/convert.py
Apache-2.0
def export_tensorflow( preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin"], model: "TFPreTrainedModel", config: OnnxConfig, opset: int, output: Path, tokenizer: Optional["PreTrainedTokenizer"] = None, ) -> Tuple[List[str], List[str]]: """ Export a TensorFlow model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`] or [`FeatureExtractionMixin`]): The preprocessor used for encoding the data. model ([`TFPreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration. """ import onnx import tensorflow as tf import tf2onnx if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and preprocessor to export the model.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.") preprocessor = tokenizer model.config.return_dict = True # Check if we need to override certain configuration item if config.values_override is not None: logger.info(f"Overriding {len(config.values_override)} configuration item(s)") for override_config_key, override_config_value in config.values_override.items(): logger.info(f"\t- {override_config_key} -> {override_config_value}") setattr(model.config, override_config_key, override_config_value) # Ensure inputs match model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.TENSORFLOW) inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) input_signature = [ tf.TensorSpec([None] * tensor.ndim, dtype=tensor.dtype, name=key) for key, tensor in model_inputs.items() ] onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset) onnx.save(onnx_model, output.as_posix()) config.restore_ops() return matched_inputs, onnx_outputs
Export a TensorFlow model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`] or [`FeatureExtractionMixin`]): The preprocessor used for encoding the data. model ([`TFPreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration.
export_tensorflow
python
huggingface/transformers
src/transformers/onnx/convert.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/convert.py
Apache-2.0
def export( preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], model: Union["PreTrainedModel", "TFPreTrainedModel"], config: OnnxConfig, opset: int, output: Path, tokenizer: Optional["PreTrainedTokenizer"] = None, device: str = "cpu", ) -> Tuple[List[str], List[str]]: """ Export a Pytorch or TensorFlow model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): The preprocessor used for encoding the data. model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. device (`str`, *optional*, defaults to `cpu`): The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for export on CUDA devices. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration. """ if not (is_torch_available() or is_tf_available()): raise ImportError( "Cannot convert because neither PyTorch nor TensorFlow are not installed. " "Please install torch or tensorflow first." ) if is_tf_available() and isinstance(model, TFPreTrainedModel) and device == "cuda": raise RuntimeError("`tf2onnx` does not support export on CUDA device.") if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.") if tokenizer is not None: warnings.warn( "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" " `preprocessor` instead.", FutureWarning, ) logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.") preprocessor = tokenizer if is_torch_available(): from ..utils import get_torch_version if not config.is_torch_support_available: logger.warning( f"Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version}," f" got: {get_torch_version()}" ) if is_torch_available() and issubclass(type(model), PreTrainedModel): return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer, device=device) elif is_tf_available() and issubclass(type(model), TFPreTrainedModel): return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer)
Export a Pytorch or TensorFlow model to an ONNX Intermediate Representation (IR) Args: preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): The preprocessor used for encoding the data. model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model to export. config ([`~onnx.config.OnnxConfig`]): The ONNX configuration associated with the exported model. opset (`int`): The version of the ONNX operator set to use. output (`Path`): Directory to store the exported ONNX model. device (`str`, *optional*, defaults to `cpu`): The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for export on CUDA devices. Returns: `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from the ONNX configuration.
export
python
huggingface/transformers
src/transformers/onnx/convert.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/convert.py
Apache-2.0
def supported_features_mapping( *supported_features: str, onnx_config_cls: Optional[str] = None ) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]: """ Generate the mapping between supported the features and their corresponding OnnxConfig for a given model. Args: *supported_features: The names of the supported features. onnx_config_cls: The OnnxConfig full name corresponding to the model. Returns: The dictionary mapping a feature to an OnnxConfig constructor. """ if onnx_config_cls is None: raise ValueError("A OnnxConfig class must be provided") config_cls = transformers for attr_name in onnx_config_cls.split("."): config_cls = getattr(config_cls, attr_name) mapping = {} for feature in supported_features: if "-with-past" in feature: task = feature.replace("-with-past", "") mapping[feature] = partial(config_cls.with_past, task=task) else: mapping[feature] = partial(config_cls.from_model_config, task=feature) return mapping
Generate the mapping between supported the features and their corresponding OnnxConfig for a given model. Args: *supported_features: The names of the supported features. onnx_config_cls: The OnnxConfig full name corresponding to the model. Returns: The dictionary mapping a feature to an OnnxConfig constructor.
supported_features_mapping
python
huggingface/transformers
src/transformers/onnx/features.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/features.py
Apache-2.0
def get_supported_features_for_model_type( model_type: str, model_name: Optional[str] = None ) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]: """ Tries to retrieve the feature -> OnnxConfig constructor map from the model type. Args: model_type (`str`): The model type to retrieve the supported features for. model_name (`str`, *optional*): The name attribute of the model object, only used for the exception message. Returns: The dictionary mapping each feature to a corresponding OnnxConfig constructor. """ model_type = model_type.lower() if model_type not in FeaturesManager._SUPPORTED_MODEL_TYPE: model_type_and_model_name = f"{model_type} ({model_name})" if model_name else model_type raise KeyError( f"{model_type_and_model_name} is not supported yet. " f"Only {list(FeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. " f"If you want to support {model_type} please propose a PR or open up an issue." ) return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type]
Tries to retrieve the feature -> OnnxConfig constructor map from the model type. Args: model_type (`str`): The model type to retrieve the supported features for. model_name (`str`, *optional*): The name attribute of the model object, only used for the exception message. Returns: The dictionary mapping each feature to a corresponding OnnxConfig constructor.
get_supported_features_for_model_type
python
huggingface/transformers
src/transformers/onnx/features.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/features.py
Apache-2.0
def _validate_framework_choice(framework: str): """ Validates if the framework requested for the export is both correct and available, otherwise throws an exception. """ if framework not in ["pt", "tf"]: raise ValueError( f"Only two frameworks are supported for ONNX export: pt or tf, but {framework} was provided." ) elif framework == "pt" and not is_torch_available(): raise RuntimeError("Cannot export model to ONNX using PyTorch because no PyTorch package was found.") elif framework == "tf" and not is_tf_available(): raise RuntimeError("Cannot export model to ONNX using TensorFlow because no TensorFlow package was found.")
Validates if the framework requested for the export is both correct and available, otherwise throws an exception.
_validate_framework_choice
python
huggingface/transformers
src/transformers/onnx/features.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/features.py
Apache-2.0
def get_model_class_for_feature(feature: str, framework: str = "pt") -> Type: """ Attempts to retrieve an AutoModel class from a feature name. Args: feature (`str`): The feature required. framework (`str`, *optional*, defaults to `"pt"`): The framework to use for the export. Returns: The AutoModel class corresponding to the feature. """ task = FeaturesManager.feature_to_task(feature) FeaturesManager._validate_framework_choice(framework) if framework == "pt": task_to_automodel = FeaturesManager._TASKS_TO_AUTOMODELS else: task_to_automodel = FeaturesManager._TASKS_TO_TF_AUTOMODELS if task not in task_to_automodel: raise KeyError( f"Unknown task: {feature}. Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}" ) return task_to_automodel[task]
Attempts to retrieve an AutoModel class from a feature name. Args: feature (`str`): The feature required. framework (`str`, *optional*, defaults to `"pt"`): The framework to use for the export. Returns: The AutoModel class corresponding to the feature.
get_model_class_for_feature
python
huggingface/transformers
src/transformers/onnx/features.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/features.py
Apache-2.0
def determine_framework(model: str, framework: Optional[str] = None) -> str: """ Determines the framework to use for the export. The priority is in the following order: 1. User input via `framework`. 2. If local checkpoint is provided, use the same framework as the checkpoint. 3. Available framework in environment, with priority given to PyTorch Args: model (`str`): The name of the model to export. framework (`str`, *optional*, defaults to `None`): The framework to use for the export. See above for priority if none provided. Returns: The framework to use for the export. """ if framework is not None: return framework framework_map = {"pt": "PyTorch", "tf": "TensorFlow"} exporter_map = {"pt": "torch", "tf": "tf2onnx"} if os.path.isdir(model): if os.path.isfile(os.path.join(model, WEIGHTS_NAME)): framework = "pt" elif os.path.isfile(os.path.join(model, TF2_WEIGHTS_NAME)): framework = "tf" else: raise FileNotFoundError( "Cannot determine framework from given checkpoint location." f" There should be a {WEIGHTS_NAME} for PyTorch" f" or {TF2_WEIGHTS_NAME} for TensorFlow." ) logger.info(f"Local {framework_map[framework]} model found.") else: if is_torch_available(): framework = "pt" elif is_tf_available(): framework = "tf" else: raise OSError("Neither PyTorch nor TensorFlow found in environment. Cannot export to ONNX.") logger.info(f"Framework not requested. Using {exporter_map[framework]} to export to ONNX.") return framework
Determines the framework to use for the export. The priority is in the following order: 1. User input via `framework`. 2. If local checkpoint is provided, use the same framework as the checkpoint. 3. Available framework in environment, with priority given to PyTorch Args: model (`str`): The name of the model to export. framework (`str`, *optional*, defaults to `None`): The framework to use for the export. See above for priority if none provided. Returns: The framework to use for the export.
determine_framework
python
huggingface/transformers
src/transformers/onnx/features.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/features.py
Apache-2.0
def get_model_from_feature( feature: str, model: str, framework: Optional[str] = None, cache_dir: Optional[str] = None ) -> Union["PreTrainedModel", "TFPreTrainedModel"]: """ Attempts to retrieve a model from a model's name and the feature to be enabled. Args: feature (`str`): The feature required. model (`str`): The name of the model to export. framework (`str`, *optional*, defaults to `None`): The framework to use for the export. See `FeaturesManager.determine_framework` for the priority should none be provided. Returns: The instance of the model. """ framework = FeaturesManager.determine_framework(model, framework) model_class = FeaturesManager.get_model_class_for_feature(feature, framework) try: model = model_class.from_pretrained(model, cache_dir=cache_dir) except OSError: if framework == "pt": logger.info("Loading TensorFlow model in PyTorch before exporting to ONNX.") model = model_class.from_pretrained(model, from_tf=True, cache_dir=cache_dir) else: logger.info("Loading PyTorch model in TensorFlow before exporting to ONNX.") model = model_class.from_pretrained(model, from_pt=True, cache_dir=cache_dir) return model
Attempts to retrieve a model from a model's name and the feature to be enabled. Args: feature (`str`): The feature required. model (`str`): The name of the model to export. framework (`str`, *optional*, defaults to `None`): The framework to use for the export. See `FeaturesManager.determine_framework` for the priority should none be provided. Returns: The instance of the model.
get_model_from_feature
python
huggingface/transformers
src/transformers/onnx/features.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/features.py
Apache-2.0
def check_supported_model_or_raise( model: Union["PreTrainedModel", "TFPreTrainedModel"], feature: str = "default" ) -> Tuple[str, Callable]: """ Check whether or not the model has the requested features. Args: model: The model to export. feature: The name of the feature to check if it is available. Returns: (str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties. """ model_type = model.config.model_type.replace("_", "-") model_name = getattr(model, "name", "") model_features = FeaturesManager.get_supported_features_for_model_type(model_type, model_name=model_name) if feature not in model_features: raise ValueError( f"{model.config.model_type} doesn't support feature {feature}. Supported values are: {model_features}" ) return model.config.model_type, FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature]
Check whether or not the model has the requested features. Args: model: The model to export. feature: The name of the feature to check if it is available. Returns: (str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties.
check_supported_model_or_raise
python
huggingface/transformers
src/transformers/onnx/features.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/features.py
Apache-2.0
def get_preprocessor(model_name: str) -> Optional[Union["AutoTokenizer", "AutoFeatureExtractor", "AutoProcessor"]]: """ Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`. Args: model_name (`str`): Name of the model for which a preprocessor are loaded. Returns: `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`: If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns `None` if no preprocessor is found. """ # Avoid circular imports by only importing this here. from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore try: return AutoProcessor.from_pretrained(model_name) except (ValueError, OSError, KeyError): tokenizer, feature_extractor = None, None try: tokenizer = AutoTokenizer.from_pretrained(model_name) except (OSError, KeyError): pass try: feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) except (OSError, KeyError): pass if tokenizer is not None and feature_extractor is not None: raise ValueError( f"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor." ) elif tokenizer is None and feature_extractor is None: return None elif tokenizer is not None: return tokenizer else: return feature_extractor
Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`. Args: model_name (`str`): Name of the model for which a preprocessor are loaded. Returns: `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`: If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns `None` if no preprocessor is found.
get_preprocessor
python
huggingface/transformers
src/transformers/onnx/utils.py
https://github.com/huggingface/transformers/blob/master/src/transformers/onnx/utils.py
Apache-2.0
def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array: """ Helper function to read an audio file through ffmpeg. """ ar = f"{sampling_rate}" ac = "1" format_for_conversion = "f32le" ffmpeg_command = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) except FileNotFoundError: raise ValueError("ffmpeg was not found but is required to load audio files from filename") output_stream = ffmpeg_process.communicate(bpayload) out_bytes = output_stream[0] audio = np.frombuffer(out_bytes, np.float32) if audio.shape[0] == 0: raise ValueError("Malformed soundfile") return audio
Helper function to read an audio file through ffmpeg.
ffmpeg_read
python
huggingface/transformers
src/transformers/pipelines/audio_classification.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/audio_classification.py
Apache-2.0
def __call__( self, inputs: Union[np.ndarray, bytes, str], **kwargs, ): """ Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more information. Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): The inputs is either : - `str` that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system. - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the same way. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) Raw audio at the correct sampling rate (no further check will be done) - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int, "raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or `"array"` is used to denote the raw audio waveform. top_k (`int`, *optional*, defaults to None): The number of top labels that will be returned by the pipeline. If the provided number is `None` or higher than the number of labels available in the model configuration, it will default to the number of labels. function_to_apply(`str`, *optional*, defaults to "softmax"): The function to apply to the model output. By default, the pipeline will apply the softmax function to the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's built-in `None` will default to "softmax", so you need to pass the string "none" to disable any post-processing. Return: A list of `dict` with the following keys: - **label** (`str`) -- The label predicted. - **score** (`float`) -- The corresponding probability. """ return super().__call__(inputs, **kwargs)
Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more information. Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): The inputs is either : - `str` that is the filename of the audio file, the file will be read at the correct sampling rate to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system. - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the same way. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) Raw audio at the correct sampling rate (no further check will be done) - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int, "raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or `"array"` is used to denote the raw audio waveform. top_k (`int`, *optional*, defaults to None): The number of top labels that will be returned by the pipeline. If the provided number is `None` or higher than the number of labels available in the model configuration, it will default to the number of labels. function_to_apply(`str`, *optional*, defaults to "softmax"): The function to apply to the model output. By default, the pipeline will apply the softmax function to the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's built-in `None` will default to "softmax", so you need to pass the string "none" to disable any post-processing. Return: A list of `dict` with the following keys: - **label** (`str`) -- The label predicted. - **score** (`float`) -- The corresponding probability.
__call__
python
huggingface/transformers
src/transformers/pipelines/audio_classification.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/audio_classification.py
Apache-2.0
def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array: """ Helper function to read an audio file through ffmpeg. """ ar = f"{sampling_rate}" ac = "1" format_for_conversion = "f32le" ffmpeg_command = [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as ffmpeg_process: output_stream = ffmpeg_process.communicate(bpayload) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename") from error out_bytes = output_stream[0] audio = np.frombuffer(out_bytes, np.float32) if audio.shape[0] == 0: raise ValueError( "Soundfile is either not in the correct format or is malformed. Ensure that the soundfile has " "a valid audio file extension (e.g. wav, flac or mp3) and is not corrupted. If reading from a remote " "URL, ensure that the URL is the full address to **download** the audio file." ) return audio
Helper function to read an audio file through ffmpeg.
ffmpeg_read
python
huggingface/transformers
src/transformers/pipelines/audio_utils.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/audio_utils.py
Apache-2.0
def ffmpeg_microphone( sampling_rate: int, chunk_length_s: float, format_for_conversion: str = "f32le", ffmpeg_input_device: Optional[str] = None, ffmpeg_additional_args: Optional[list[str]] = None, ): """ Helper function to read audio from a microphone using ffmpeg. The default input device will be used unless another input device is specified using the `ffmpeg_input_device` argument. Uses 'alsa' on Linux, 'avfoundation' on MacOS and 'dshow' on Windows. Arguments: sampling_rate (`int`): The sampling_rate to use when reading the data from the microphone. Try using the model's sampling_rate to avoid resampling later. chunk_length_s (`float` or `int`): The length of the maximum chunk of audio to be sent returned. format_for_conversion (`str`, defaults to `f32le`): The name of the format of the audio samples to be returned by ffmpeg. The standard is `f32le`, `s16le` could also be used. ffmpeg_input_device (`str`, *optional*): The identifier of the input device to be used by ffmpeg (i.e. ffmpeg's '-i' argument). If unset, the default input device will be used. See `https://www.ffmpeg.org/ffmpeg-devices.html#Input-Devices` for how to specify and list input devices. ffmpeg_additional_args (`list[str]`, *optional*): Additional arguments to pass to ffmpeg, can include arguments like -nostdin for running as a background process. For example, to pass -nostdin to the ffmpeg process, pass in ["-nostdin"]. If passing in flags with multiple arguments, use the following convention (eg ["flag", "arg1", "arg2]). Returns: A generator yielding audio chunks of `chunk_length_s` seconds as `bytes` objects of length `int(round(sampling_rate * chunk_length_s)) * size_of_sample`. """ ar = f"{sampling_rate}" ac = "1" if format_for_conversion == "s16le": size_of_sample = 2 elif format_for_conversion == "f32le": size_of_sample = 4 else: raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`") system = platform.system() if system == "Linux": format_ = "alsa" input_ = ffmpeg_input_device or "default" elif system == "Darwin": format_ = "avfoundation" input_ = ffmpeg_input_device or ":default" elif system == "Windows": format_ = "dshow" input_ = ffmpeg_input_device or _get_microphone_name() ffmpeg_additional_args = [] if ffmpeg_additional_args is None else ffmpeg_additional_args ffmpeg_command = [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] ffmpeg_command.extend(ffmpeg_additional_args) chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample iterator = _ffmpeg_stream(ffmpeg_command, chunk_len) for item in iterator: yield item
Helper function to read audio from a microphone using ffmpeg. The default input device will be used unless another input device is specified using the `ffmpeg_input_device` argument. Uses 'alsa' on Linux, 'avfoundation' on MacOS and 'dshow' on Windows. Arguments: sampling_rate (`int`): The sampling_rate to use when reading the data from the microphone. Try using the model's sampling_rate to avoid resampling later. chunk_length_s (`float` or `int`): The length of the maximum chunk of audio to be sent returned. format_for_conversion (`str`, defaults to `f32le`): The name of the format of the audio samples to be returned by ffmpeg. The standard is `f32le`, `s16le` could also be used. ffmpeg_input_device (`str`, *optional*): The identifier of the input device to be used by ffmpeg (i.e. ffmpeg's '-i' argument). If unset, the default input device will be used. See `https://www.ffmpeg.org/ffmpeg-devices.html#Input-Devices` for how to specify and list input devices. ffmpeg_additional_args (`list[str]`, *optional*): Additional arguments to pass to ffmpeg, can include arguments like -nostdin for running as a background process. For example, to pass -nostdin to the ffmpeg process, pass in ["-nostdin"]. If passing in flags with multiple arguments, use the following convention (eg ["flag", "arg1", "arg2]). Returns: A generator yielding audio chunks of `chunk_length_s` seconds as `bytes` objects of length `int(round(sampling_rate * chunk_length_s)) * size_of_sample`.
ffmpeg_microphone
python
huggingface/transformers
src/transformers/pipelines/audio_utils.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/audio_utils.py
Apache-2.0
def ffmpeg_microphone_live( sampling_rate: int, chunk_length_s: float, stream_chunk_s: Optional[int] = None, stride_length_s: Optional[Union[Tuple[float, float], float]] = None, format_for_conversion: str = "f32le", ffmpeg_input_device: Optional[str] = None, ffmpeg_additional_args: Optional[list[str]] = None, ): """ Helper function to read audio from a microphone using ffmpeg. This will output `partial` overlapping chunks starting from `stream_chunk_s` (if it is defined) until `chunk_length_s` is reached. It will make use of striding to avoid errors on the "sides" of the various chunks. The default input device will be used unless another input device is specified using the `ffmpeg_input_device` argument. Uses 'alsa' on Linux, 'avfoundation' on MacOS and 'dshow' on Windows. Arguments: sampling_rate (`int`): The sampling_rate to use when reading the data from the microphone. Try using the model's sampling_rate to avoid resampling later. chunk_length_s (`float` or `int`): The length of the maximum chunk of audio to be sent returned. This includes the eventual striding. stream_chunk_s (`float` or `int`): The length of the minimal temporary audio to be returned. stride_length_s (`float` or `int` or `(float, float)`, *optional*): The length of the striding to be used. Stride is used to provide context to a model on the (left, right) of an audio sample but without using that part to actually make the prediction. Setting this does not change the length of the chunk. format_for_conversion (`str`, *optional*, defaults to `f32le`): The name of the format of the audio samples to be returned by ffmpeg. The standard is `f32le`, `s16le` could also be used. ffmpeg_input_device (`str`, *optional*): The identifier of the input device to be used by ffmpeg (i.e. ffmpeg's '-i' argument). If unset, the default input device will be used. See `https://www.ffmpeg.org/ffmpeg-devices.html#Input-Devices` for how to specify and list input devices. ffmpeg_additional_args (`list[str]`, *optional*): Additional arguments to pass to ffmpeg, can include arguments like -nostdin for running as a background process. For example, to pass -nostdin to the ffmpeg process, pass in ["-nostdin"]. If passing in flags with multiple arguments, use the following convention (eg ["flag", "arg1", "arg2]). Return: A generator yielding dictionaries of the following form `{"sampling_rate": int, "raw": np.array(), "partial" bool}` With optionally a `"stride" (int, int)` key if `stride_length_s` is defined. `stride` and `raw` are all expressed in `samples`, and `partial` is a boolean saying if the current yield item is a whole chunk, or a partial temporary result to be later replaced by another larger chunk. """ if stream_chunk_s is not None: chunk_s = stream_chunk_s else: chunk_s = chunk_length_s microphone = ffmpeg_microphone( sampling_rate, chunk_s, format_for_conversion=format_for_conversion, ffmpeg_input_device=ffmpeg_input_device, ffmpeg_additional_args=[] if ffmpeg_additional_args is None else ffmpeg_additional_args, ) if format_for_conversion == "s16le": dtype = np.int16 size_of_sample = 2 elif format_for_conversion == "f32le": dtype = np.float32 size_of_sample = 4 else: raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`") if stride_length_s is None: stride_length_s = chunk_length_s / 6 chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample if isinstance(stride_length_s, (int, float)): stride_length_s = [stride_length_s, stride_length_s] stride_left = int(round(sampling_rate * stride_length_s[0])) * size_of_sample stride_right = int(round(sampling_rate * stride_length_s[1])) * size_of_sample audio_time = datetime.datetime.now() delta = datetime.timedelta(seconds=chunk_s) for item in chunk_bytes_iter(microphone, chunk_len, stride=(stride_left, stride_right), stream=True): # Put everything back in numpy scale item["raw"] = np.frombuffer(item["raw"], dtype=dtype) item["stride"] = ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) item["sampling_rate"] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item
Helper function to read audio from a microphone using ffmpeg. This will output `partial` overlapping chunks starting from `stream_chunk_s` (if it is defined) until `chunk_length_s` is reached. It will make use of striding to avoid errors on the "sides" of the various chunks. The default input device will be used unless another input device is specified using the `ffmpeg_input_device` argument. Uses 'alsa' on Linux, 'avfoundation' on MacOS and 'dshow' on Windows. Arguments: sampling_rate (`int`): The sampling_rate to use when reading the data from the microphone. Try using the model's sampling_rate to avoid resampling later. chunk_length_s (`float` or `int`): The length of the maximum chunk of audio to be sent returned. This includes the eventual striding. stream_chunk_s (`float` or `int`): The length of the minimal temporary audio to be returned. stride_length_s (`float` or `int` or `(float, float)`, *optional*): The length of the striding to be used. Stride is used to provide context to a model on the (left, right) of an audio sample but without using that part to actually make the prediction. Setting this does not change the length of the chunk. format_for_conversion (`str`, *optional*, defaults to `f32le`): The name of the format of the audio samples to be returned by ffmpeg. The standard is `f32le`, `s16le` could also be used. ffmpeg_input_device (`str`, *optional*): The identifier of the input device to be used by ffmpeg (i.e. ffmpeg's '-i' argument). If unset, the default input device will be used. See `https://www.ffmpeg.org/ffmpeg-devices.html#Input-Devices` for how to specify and list input devices. ffmpeg_additional_args (`list[str]`, *optional*): Additional arguments to pass to ffmpeg, can include arguments like -nostdin for running as a background process. For example, to pass -nostdin to the ffmpeg process, pass in ["-nostdin"]. If passing in flags with multiple arguments, use the following convention (eg ["flag", "arg1", "arg2]). Return: A generator yielding dictionaries of the following form `{"sampling_rate": int, "raw": np.array(), "partial" bool}` With optionally a `"stride" (int, int)` key if `stride_length_s` is defined. `stride` and `raw` are all expressed in `samples`, and `partial` is a boolean saying if the current yield item is a whole chunk, or a partial temporary result to be later replaced by another larger chunk.
ffmpeg_microphone_live
python
huggingface/transformers
src/transformers/pipelines/audio_utils.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/audio_utils.py
Apache-2.0
def chunk_bytes_iter(iterator, chunk_len: int, stride: Tuple[int, int], stream: bool = False): """ Reads raw bytes from an iterator and does chunks of length `chunk_len`. Optionally adds `stride` to each chunks to get overlaps. `stream` is used to return partial results even if a full `chunk_len` is not yet available. """ acc = b"" stride_left, stride_right = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" ) _stride_left = 0 for raw in iterator: acc += raw if stream and len(acc) < chunk_len: stride = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(acc) >= chunk_len: # We are flushing the accumulator stride = (_stride_left, stride_right) item = {"raw": acc[:chunk_len], "stride": stride} if stream: item["partial"] = False yield item _stride_left = stride_left acc = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(acc) > stride_left: item = {"raw": acc, "stride": (_stride_left, 0)} if stream: item["partial"] = False yield item
Reads raw bytes from an iterator and does chunks of length `chunk_len`. Optionally adds `stride` to each chunks to get overlaps. `stream` is used to return partial results even if a full `chunk_len` is not yet available.
chunk_bytes_iter
python
huggingface/transformers
src/transformers/pipelines/audio_utils.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/audio_utils.py
Apache-2.0
def _ffmpeg_stream(ffmpeg_command, buflen: int): """ Internal function to create the generator of data through ffmpeg """ bufsize = 2**24 # 16Mo try: with subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, bufsize=bufsize) as ffmpeg_process: while True: raw = ffmpeg_process.stdout.read(buflen) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename") from error
Internal function to create the generator of data through ffmpeg
_ffmpeg_stream
python
huggingface/transformers
src/transformers/pipelines/audio_utils.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/audio_utils.py
Apache-2.0
def _get_microphone_name(): """ Retrieve the microphone name in Windows . """ command = ["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", ""] try: ffmpeg_devices = subprocess.run(command, text=True, stderr=subprocess.PIPE, encoding="utf-8") microphone_lines = [line for line in ffmpeg_devices.stderr.splitlines() if "(audio)" in line] if microphone_lines: microphone_name = microphone_lines[0].split('"')[1] print(f"Using microphone: {microphone_name}") return f"audio={microphone_name}" except FileNotFoundError: print("ffmpeg was not found. Please install it or make sure it is in your system PATH.") return "default"
Retrieve the microphone name in Windows .
_get_microphone_name
python
huggingface/transformers
src/transformers/pipelines/audio_utils.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/audio_utils.py
Apache-2.0
def rescale_stride(stride, ratio): """ Rescales the stride values from audio space to tokens/logits space. (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance. """ # Shape is [B, SEQ] for tokens # [B, SEQ, V] for logits new_strides = [] for input_n, left, right in stride: token_n = int(round(input_n * ratio)) left = int(round(left / input_n * token_n)) right = int(round(right / input_n * token_n)) new_stride = (token_n, left, right) new_strides.append(new_stride) return new_strides
Rescales the stride values from audio space to tokens/logits space. (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance.
rescale_stride
python
huggingface/transformers
src/transformers/pipelines/automatic_speech_recognition.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/automatic_speech_recognition.py
Apache-2.0
def __call__( self, inputs: Union[np.ndarray, bytes, str], **kwargs, ): """ Transcribe the audio sequence(s) given as inputs to text. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more information. Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): The inputs is either : - `str` that is either the filename of a local audio file, or a public URL address to download the audio file. The file will be read at the correct sampling rate to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system. - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the same way. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) Raw audio at the correct sampling rate (no further check will be done) - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw": np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to treat the first `left` samples and last `right` samples to be ignored in decoding (but used at inference to provide more context to the model). Only use `stride` with CTC models. return_timestamps (*optional*, `str` or `bool`): Only available for pure CTC models (Wav2Vec2, HuBERT, etc) and the Whisper model. Not available for other sequence-to-sequence models. For CTC models, timestamps can take one of two formats: - `"char"`: the pipeline will return timestamps along the text for every character in the text. For instance, if you get `[{"text": "h", "timestamp": (0.5, 0.6)}, {"text": "i", "timestamp": (0.7, 0.9)}]`, then it means the model predicts that the letter "h" was spoken after `0.5` and before `0.6` seconds. - `"word"`: the pipeline will return timestamps along the text for every word in the text. For instance, if you get `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp": (1.0, 1.5)}]`, then it means the model predicts that the word "hi" was spoken after `0.5` and before `0.9` seconds. For the Whisper model, timestamps can take one of two formats: - `"word"`: same as above for word-level CTC timestamps. Word-level timestamps are predicted through the *dynamic-time warping (DTW)* algorithm, an approximation to word-level timestamps by inspecting the cross-attention weights. - `True`: the pipeline will return timestamps along the text for *segments* of words in the text. For instance, if you get `[{"text": " Hi there!", "timestamp": (0.5, 1.5)}]`, then it means the model predicts that the segment "Hi there!" was spoken after `0.5` and before `1.5` seconds. Note that a segment of text refers to a sequence of one or more words, rather than individual words as with word-level timestamps. generate_kwargs (`dict`, *optional*): The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a complete overview of generate, check the [following guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). Return: `Dict`: A dictionary with the following keys: - **text** (`str`): The recognized text. - **chunks** (*optional(, `List[Dict]`) When using `return_timestamps`, the `chunks` will become a list containing all the various text chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing `"".join(chunk["text"] for chunk in output["chunks"])`. """ return super().__call__(inputs, **kwargs)
Transcribe the audio sequence(s) given as inputs to text. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more information. Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): The inputs is either : - `str` that is either the filename of a local audio file, or a public URL address to download the audio file. The file will be read at the correct sampling rate to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system. - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the same way. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) Raw audio at the correct sampling rate (no further check will be done) - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw": np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to treat the first `left` samples and last `right` samples to be ignored in decoding (but used at inference to provide more context to the model). Only use `stride` with CTC models. return_timestamps (*optional*, `str` or `bool`): Only available for pure CTC models (Wav2Vec2, HuBERT, etc) and the Whisper model. Not available for other sequence-to-sequence models. For CTC models, timestamps can take one of two formats: - `"char"`: the pipeline will return timestamps along the text for every character in the text. For instance, if you get `[{"text": "h", "timestamp": (0.5, 0.6)}, {"text": "i", "timestamp": (0.7, 0.9)}]`, then it means the model predicts that the letter "h" was spoken after `0.5` and before `0.6` seconds. - `"word"`: the pipeline will return timestamps along the text for every word in the text. For instance, if you get `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp": (1.0, 1.5)}]`, then it means the model predicts that the word "hi" was spoken after `0.5` and before `0.9` seconds. For the Whisper model, timestamps can take one of two formats: - `"word"`: same as above for word-level CTC timestamps. Word-level timestamps are predicted through the *dynamic-time warping (DTW)* algorithm, an approximation to word-level timestamps by inspecting the cross-attention weights. - `True`: the pipeline will return timestamps along the text for *segments* of words in the text. For instance, if you get `[{"text": " Hi there!", "timestamp": (0.5, 1.5)}]`, then it means the model predicts that the segment "Hi there!" was spoken after `0.5` and before `1.5` seconds. Note that a segment of text refers to a sequence of one or more words, rather than individual words as with word-level timestamps. generate_kwargs (`dict`, *optional*): The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a complete overview of generate, check the [following guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). Return: `Dict`: A dictionary with the following keys: - **text** (`str`): The recognized text. - **chunks** (*optional(, `List[Dict]`) When using `return_timestamps`, the `chunks` will become a list containing all the various text chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing `"".join(chunk["text"] for chunk in output["chunks"])`.
__call__
python
huggingface/transformers
src/transformers/pipelines/automatic_speech_recognition.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/automatic_speech_recognition.py
Apache-2.0
def infer_framework_load_model( model, config: AutoConfig, model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. config ([`AutoConfig`]): The config associated with the model to help using the correct class model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. """ if not is_tf_available() and not is_torch_available(): raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) if isinstance(model, str): model_kwargs["_from_pipeline"] = task class_tuple = () look_pt = is_torch_available() and framework in {"pt", None} look_tf = is_tf_available() and framework in {"tf", None} if model_classes: if look_pt: class_tuple = class_tuple + model_classes.get("pt", (AutoModel,)) if look_tf: class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,)) if config.architectures: classes = [] for architecture in config.architectures: transformers_module = importlib.import_module("transformers") if look_pt: _class = getattr(transformers_module, architecture, None) if _class is not None: classes.append(_class) if look_tf: _class = getattr(transformers_module, f"TF{architecture}", None) if _class is not None: classes.append(_class) class_tuple = class_tuple + tuple(classes) if len(class_tuple) == 0: raise ValueError(f"Pipeline cannot infer suitable model classes from {model}") all_traceback = {} for model_class in class_tuple: kwargs = model_kwargs.copy() if framework == "pt" and model.endswith(".h5"): kwargs["from_tf"] = True logger.warning( "Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. " "Trying to load the model with PyTorch." ) elif framework == "tf" and model.endswith(".bin"): kwargs["from_pt"] = True logger.warning( "Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. " "Trying to load the model with Tensorflow." ) try: model = model_class.from_pretrained(model, **kwargs) if hasattr(model, "eval"): model = model.eval() # Stop loading on the first successful load. break except (OSError, ValueError): all_traceback[model_class.__name__] = traceback.format_exc() continue if isinstance(model, str): error = "" for class_name, trace in all_traceback.items(): error += f"while loading with {class_name}, an error is thrown:\n{trace}\n" raise ValueError( f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n" ) if framework is None: framework = infer_framework(model.__class__) return framework, model
Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. config ([`AutoConfig`]): The config associated with the model to help using the correct class model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model.
infer_framework_load_model
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def infer_framework_from_model( model, model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model. """ if isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs) else: config = model.config return infer_framework_load_model( model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs )
Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). If `model` is instantiated, this function will just infer the framework from the model class. Otherwise `model` is actually a checkpoint name and this method will try to instantiate it using `model_classes`. Since we don't want to instantiate the model twice, this model is returned for use by the pipeline. If both frameworks are installed and available for `model`, PyTorch is selected. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): The model to infer the framework from. If `str`, a checkpoint name. The model to infer the framewrok from. model_classes (dictionary `str` to `type`, *optional*): A mapping framework to class. task (`str`): The task defining which pipeline will be returned. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. Returns: `Tuple`: A tuple framework, model.
infer_framework_from_model
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def get_framework(model, revision: Optional[str] = None): """ Select framework (TensorFlow or PyTorch) to use. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): If both frameworks are installed, picks the one corresponding to the model passed (either a model class or the model name). If no specific model is provided, defaults to using PyTorch. """ warnings.warn( "`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.", FutureWarning, ) if not is_tf_available() and not is_torch_available(): raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) if isinstance(model, str): if is_torch_available() and not is_tf_available(): model = AutoModel.from_pretrained(model, revision=revision) elif is_tf_available() and not is_torch_available(): model = TFAutoModel.from_pretrained(model, revision=revision) else: try: model = AutoModel.from_pretrained(model, revision=revision) except OSError: model = TFAutoModel.from_pretrained(model, revision=revision) framework = infer_framework(model.__class__) return framework
Select framework (TensorFlow or PyTorch) to use. Args: model (`str`, [`PreTrainedModel`] or [`TFPreTrainedModel]`): If both frameworks are installed, picks the one corresponding to the model passed (either a model class or the model name). If no specific model is provided, defaults to using PyTorch.
get_framework
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def get_default_model_and_revision( targeted_task: Dict, framework: Optional[str], task_options: Optional[Any] ) -> Tuple[str, str]: """ Select a default model to use for a given task. Defaults to pytorch if ambiguous. Args: targeted_task (`Dict`): Dictionary representing the given task, that should contain default models framework (`str`, None) "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet. task_options (`Any`, None) Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for translation task. Returns Tuple: - `str` The model string representing the default model for this pipeline. - `str` The revision of the model. """ if is_torch_available() and not is_tf_available(): framework = "pt" elif is_tf_available() and not is_torch_available(): framework = "tf" defaults = targeted_task["default"] if task_options: if task_options not in defaults: raise ValueError(f"The task does not provide any default models for options {task_options}") default_models = defaults[task_options]["model"] elif "model" in defaults: default_models = targeted_task["default"]["model"] else: # XXX This error message needs to be updated to be more generic if more tasks are going to become # parametrized raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"') if framework is None: framework = "pt" return default_models[framework]
Select a default model to use for a given task. Defaults to pytorch if ambiguous. Args: targeted_task (`Dict`): Dictionary representing the given task, that should contain default models framework (`str`, None) "pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet. task_options (`Any`, None) Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for translation task. Returns Tuple: - `str` The model string representing the default model for this pipeline. - `str` The revision of the model.
get_default_model_and_revision
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def load_assistant_model( model: "PreTrainedModel", assistant_model: Optional[Union[str, "PreTrainedModel"]], assistant_tokenizer: Optional[PreTrainedTokenizer], ) -> Tuple[Optional["PreTrainedModel"], Optional[PreTrainedTokenizer]]: """ Prepares the assistant model and the assistant tokenizer for a pipeline whose model that can call `generate`. Args: model ([`PreTrainedModel`]): The main model that will be used by the pipeline to make predictions. assistant_model (`str` or [`PreTrainedModel`], *optional*): The assistant model that will be used by the pipeline to make predictions. assistant_tokenizer ([`PreTrainedTokenizer`], *optional*): The assistant tokenizer that will be used by the pipeline to encode data for the model. Returns: Tuple: The loaded assistant model and (optionally) the loaded tokenizer. """ if not model.can_generate() or assistant_model is None: return None, None if getattr(model, "framework") != "pt" or not isinstance(model, PreTrainedModel): raise ValueError( "Assisted generation, triggered by the `assistant_model` argument, is only available for " "`PreTrainedModel` model instances. For instance, TF or JAX models are not supported." ) # If the model is passed as a string, load the model and the corresponding tokenizer if isinstance(assistant_model, str): assistant_config = AutoConfig.from_pretrained(assistant_model) _, loaded_assistant_model = infer_framework_load_model(assistant_model, config=assistant_config) loaded_assistant_model = loaded_assistant_model.to(device=model.device, dtype=model.dtype) loaded_assistant_tokenizer = AutoTokenizer.from_pretrained(assistant_model) else: loaded_assistant_model = assistant_model loaded_assistant_tokenizer = assistant_tokenizer # Finally, let's check the tokenizers: if the two models have different tokenizers, we need to keep the assistant # tokenizer same_vocab_size = model.config.vocab_size == loaded_assistant_model.config.vocab_size same_special_tokens = all( getattr(model.config, token) == getattr(loaded_assistant_model.config, token) for token in ("eos_token_id", "pad_token_id", "bos_token_id") ) if same_vocab_size and same_special_tokens: loaded_assistant_tokenizer = None elif loaded_assistant_tokenizer is None: raise ValueError( "The assistant model has a different tokenizer than the main model. You should pass the assistant " "tokenizer." ) return loaded_assistant_model, loaded_assistant_tokenizer
Prepares the assistant model and the assistant tokenizer for a pipeline whose model that can call `generate`. Args: model ([`PreTrainedModel`]): The main model that will be used by the pipeline to make predictions. assistant_model (`str` or [`PreTrainedModel`], *optional*): The assistant model that will be used by the pipeline to make predictions. assistant_tokenizer ([`PreTrainedTokenizer`], *optional*): The assistant tokenizer that will be used by the pipeline to encode data for the model. Returns: Tuple: The loaded assistant model and (optionally) the loaded tokenizer.
load_assistant_model
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def save_binary(self, data: Union[dict, List[dict]]) -> str: """ Save the provided data object as a pickle-formatted binary data on the disk. Args: data (`dict` or list of `dict`): The data to store. Returns: `str`: Path where the data has been saved. """ path, _ = os.path.splitext(self.output_path) binary_path = os.path.extsep.join((path, "pickle")) with open(binary_path, "wb+") as f_output: pickle.dump(data, f_output) return binary_path
Save the provided data object as a pickle-formatted binary data on the disk. Args: data (`dict` or list of `dict`): The data to store. Returns: `str`: Path where the data has been saved.
save_binary
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def from_str( format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False, ) -> "PipelineDataFormat": """ Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`. Args: format (`str`): The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`. output_path (`str`, *optional*): Where to save the outgoing data. input_path (`str`, *optional*): Where to look for the input data. column (`str`, *optional*): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. Returns: [`~pipelines.PipelineDataFormat`]: The proper data format. """ if format == "json": return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) elif format == "csv": return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) elif format == "pipe": return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite) else: raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)")
Creates an instance of the right subclass of [`~pipelines.PipelineDataFormat`] depending on `format`. Args: format (`str`): The format of the desired pipeline. Acceptable values are `"json"`, `"csv"` or `"pipe"`. output_path (`str`, *optional*): Where to save the outgoing data. input_path (`str`, *optional*): Where to look for the input data. column (`str`, *optional*): The column to read. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to overwrite the `output_path`. Returns: [`~pipelines.PipelineDataFormat`]: The proper data format.
from_str
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def save(self, data: List[dict]): """ Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. Args: data (`List[dict]`): The data to store. """ with open(self.output_path, "w") as f: if len(data) > 0: writer = csv.DictWriter(f, list(data[0].keys())) writer.writeheader() writer.writerows(data)
Save the provided data object with the representation for the current [`~pipelines.PipelineDataFormat`]. Args: data (`List[dict]`): The data to store.
save
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def save_pretrained( self, save_directory: Union[str, os.PathLike], safe_serialization: bool = True, **kwargs, ): """ Save the pipeline's model and tokenizer. Args: save_directory (`str` or `os.PathLike`): A path to the directory where to saved. It will be created if it doesn't exist. safe_serialization (`str`): Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow. kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if hasattr(self, "_registered_impl"): # Add info to the config pipeline_info = self._registered_impl.copy() custom_pipelines = {} for task, info in pipeline_info.items(): if info["impl"] != self.__class__: continue info = info.copy() module_name = info["impl"].__module__ last_module = module_name.split(".")[-1] # Change classes into their names/full names info["impl"] = f"{last_module}.{info['impl'].__name__}" info["pt"] = tuple(c.__name__ for c in info["pt"]) info["tf"] = tuple(c.__name__ for c in info["tf"]) custom_pipelines[task] = info self.model.config.custom_pipelines = custom_pipelines # Save the pipeline custom code custom_object_save(self, save_directory) kwargs["safe_serialization"] = safe_serialization self.model.save_pretrained(save_directory, **kwargs) if self.tokenizer is not None: self.tokenizer.save_pretrained(save_directory, **kwargs) if self.feature_extractor is not None: self.feature_extractor.save_pretrained(save_directory, **kwargs) if self.image_processor is not None: self.image_processor.save_pretrained(save_directory, **kwargs) if self.modelcard is not None: self.modelcard.save_pretrained(save_directory)
Save the pipeline's model and tokenizer. Args: save_directory (`str` or `os.PathLike`): A path to the directory where to saved. It will be created if it doesn't exist. safe_serialization (`str`): Whether to save the model using `safetensors` or the traditional way for PyTorch or Tensorflow. kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
save_pretrained
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def device_placement(self): """ Context Manager allowing tensor allocation on the user-specified device in framework agnostic way. Returns: Context manager Examples: ```python # Explicitly ask for tensor allocation on CUDA device :0 pipe = pipeline(..., device=0) with pipe.device_placement(): # Every framework specific tensor allocation will be done on the request device output = pipe(...) ```""" if self.framework == "tf": with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"): yield else: if self.device.type == "cuda": with torch.cuda.device(self.device): yield elif self.device.type == "mlu": with torch.mlu.device(self.device): yield elif self.device.type == "musa": with torch.musa.device(self.device): yield elif self.device.type == "xpu": with torch.xpu.device(self.device): yield else: yield
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way. Returns: Context manager Examples: ```python # Explicitly ask for tensor allocation on CUDA device :0 pipe = pipeline(..., device=0) with pipe.device_placement(): # Every framework specific tensor allocation will be done on the request device output = pipe(...) ```
device_placement
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def check_model_type(self, supported_models: Union[List[str], dict]): """ Check if the model class is in supported by the pipeline. Args: supported_models (`List[str]` or `dict`): The list of models supported by the pipeline, or a dictionary with model class values. """ if not isinstance(supported_models, list): # Create from a model mapping supported_models_names = [] if self.task in SUPPORTED_PEFT_TASKS: supported_models_names.extend(SUPPORTED_PEFT_TASKS[self.task]) for _, model_name in supported_models.items(): # Mapping can now contain tuples of models for the same configuration. if isinstance(model_name, tuple): supported_models_names.extend(list(model_name)) else: supported_models_names.append(model_name) if hasattr(supported_models, "_model_mapping"): for _, model in supported_models._model_mapping._extra_content.items(): if isinstance(model_name, tuple): supported_models_names.extend([m.__name__ for m in model]) else: supported_models_names.append(model.__name__) supported_models = supported_models_names if self.model.__class__.__name__ not in supported_models: logger.error( f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are" f" {supported_models}." )
Check if the model class is in supported by the pipeline. Args: supported_models (`List[str]` or `dict`): The list of models supported by the pipeline, or a dictionary with model class values.
check_model_type
python
huggingface/transformers
src/transformers/pipelines/base.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/base.py
Apache-2.0
def __call__(self, inputs: Union[str, List[str], "Image.Image", List["Image.Image"]] = None, **kwargs): """ Predict the depth(s) of the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. parameters (`Dict`, *optional*): A dictionary of argument names to parameter values, to control pipeline behaviour. The only parameter available right now is `timeout`, which is the length of time, in seconds, that the pipeline should wait before giving up on trying to download an image. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images. The dictionaries contain the following keys: - **predicted_depth** (`torch.Tensor`) -- The predicted depth by the model as a `torch.Tensor`. - **depth** (`PIL.Image`) -- The predicted depth by the model as a `PIL.Image`. """ # After deprecation of this is completed, remove the default `None` value for `images` if "images" in kwargs: inputs = kwargs.pop("images") if inputs is None: raise ValueError("Cannot call the depth-estimation pipeline without an inputs argument!") return super().__call__(inputs, **kwargs)
Predict the depth(s) of the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. parameters (`Dict`, *optional*): A dictionary of argument names to parameter values, to control pipeline behaviour. The only parameter available right now is `timeout`, which is the length of time, in seconds, that the pipeline should wait before giving up on trying to download an image. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images. The dictionaries contain the following keys: - **predicted_depth** (`torch.Tensor`) -- The predicted depth by the model as a `torch.Tensor`. - **depth** (`PIL.Image`) -- The predicted depth by the model as a `PIL.Image`.
__call__
python
huggingface/transformers
src/transformers/pipelines/depth_estimation.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/depth_estimation.py
Apache-2.0
def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: Optional[str]): """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" # apply OCR data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config) words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format actual_boxes = [] for x, y, w, h in zip(left, top, width, height): actual_box = [x, y, x + w, y + h] actual_boxes.append(actual_box) image_width, image_height = image.size # finally, normalize the bounding boxes normalized_boxes = [] for box in actual_boxes: normalized_boxes.append(normalize_box(box, image_width, image_height)) if len(words) != len(normalized_boxes): raise ValueError("Not as many words as there are bounding boxes") return words, normalized_boxes
Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.
apply_tesseract
python
huggingface/transformers
src/transformers/pipelines/document_question_answering.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/document_question_answering.py
Apache-2.0
def __call__( self, image: Union["Image.Image", str], question: Optional[str] = None, word_boxes: Optional[Tuple[str, List[float]]] = None, **kwargs, ): """ Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for LayoutLM-like models which require them as input. For Donut, no OCR is run. You can invoke the pipeline several ways: - `pipeline(image=image, question=question)` - `pipeline(image=image, question=question, word_boxes=word_boxes)` - `pipeline([{"image": image, "question": question}])` - `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])` Args: image (`str` or `PIL.Image`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions. question (`str`): A question to ask of the document. word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*): A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the pipeline will use these words and boxes instead of running OCR on the image to derive them for models that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the pipeline without having to re-run it each time. top_k (`int`, *optional*, defaults to 1): The number of answers to return (will be chosen by order of likelihood). Note that we return less than top_k answers if there are not enough options available within the context. doc_stride (`int`, *optional*, defaults to 128): If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap. max_answer_len (`int`, *optional*, defaults to 15): The maximum length of predicted answers (e.g., only answers with a shorter length are considered). max_seq_len (`int`, *optional*, defaults to 384): The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. max_question_len (`int`, *optional*, defaults to 64): The maximum length of the question after tokenization. It will be truncated if needed. handle_impossible_answer (`bool`, *optional*, defaults to `False`): Whether or not we accept impossible as an answer. lang (`str`, *optional*): Language to use while running OCR. Defaults to english. tesseract_config (`str`, *optional*): Additional flags to pass to tesseract while running OCR. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: - **score** (`float`) -- The probability associated to the answer. - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **answer** (`str`) -- The answer to the question. - **words** (`list[int]`) -- The index of each word/box pair that is in the answer """ if isinstance(question, str): inputs = {"question": question, "image": image} if word_boxes is not None: inputs["word_boxes"] = word_boxes else: inputs = image return super().__call__(inputs, **kwargs)
Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for LayoutLM-like models which require them as input. For Donut, no OCR is run. You can invoke the pipeline several ways: - `pipeline(image=image, question=question)` - `pipeline(image=image, question=question, word_boxes=word_boxes)` - `pipeline([{"image": image, "question": question}])` - `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])` Args: image (`str` or `PIL.Image`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. If given a single image, it can be broadcasted to multiple questions. question (`str`): A question to ask of the document. word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*): A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the pipeline will use these words and boxes instead of running OCR on the image to derive them for models that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the pipeline without having to re-run it each time. top_k (`int`, *optional*, defaults to 1): The number of answers to return (will be chosen by order of likelihood). Note that we return less than top_k answers if there are not enough options available within the context. doc_stride (`int`, *optional*, defaults to 128): If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap. max_answer_len (`int`, *optional*, defaults to 15): The maximum length of predicted answers (e.g., only answers with a shorter length are considered). max_seq_len (`int`, *optional*, defaults to 384): The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. max_question_len (`int`, *optional*, defaults to 64): The maximum length of the question after tokenization. It will be truncated if needed. handle_impossible_answer (`bool`, *optional*, defaults to `False`): Whether or not we accept impossible as an answer. lang (`str`, *optional*): Language to use while running OCR. Defaults to english. tesseract_config (`str`, *optional*): Additional flags to pass to tesseract while running OCR. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: - **score** (`float`) -- The probability associated to the answer. - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided `word_boxes`). - **answer** (`str`) -- The answer to the question. - **words** (`list[int]`) -- The index of each word/box pair that is in the answer
__call__
python
huggingface/transformers
src/transformers/pipelines/document_question_answering.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/document_question_answering.py
Apache-2.0
def __call__(self, inputs: Union[str, List[str], "Image.Image", List["Image.Image"]] = None, **kwargs): """ Assign labels to the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: If this argument is not specified, then it will apply the following functions according to the number of labels: - If the model has a single label, will apply the sigmoid function on the output. - If the model has several labels, will apply the softmax function on the output. Possible values are: - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ # After deprecation of this is completed, remove the default `None` value for `images` if "images" in kwargs: inputs = kwargs.pop("images") if inputs is None: raise ValueError("Cannot call the image-classification pipeline without an inputs argument!") return super().__call__(inputs, **kwargs)
Assign labels to the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: If this argument is not specified, then it will apply the following functions according to the number of labels: - If the model has a single label, will apply the sigmoid function on the output. - If the model has several labels, will apply the softmax function on the output. Possible values are: - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label.
__call__
python
huggingface/transformers
src/transformers/pipelines/image_classification.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/image_classification.py
Apache-2.0
def __call__(self, inputs=None, **kwargs) -> Union[Predictions, List[Prediction]]: """ Perform segmentation (detect masks & classes) in the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing an HTTP(S) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images. subtask (`str`, *optional*): Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model capabilities. If not set, the pipeline will attempt tp resolve in the following order: `panoptic`, `instance`, `semantic`. threshold (`float`, *optional*, defaults to 0.9): Probability threshold to filter out predicted masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5): Mask overlap threshold to eliminate small, disconnected segments. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image. The dictionaries contain the mask, label and score (where applicable) of each detected object and contains the following keys: - **label** (`str`) -- The class label identified by the model. - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of the original image. Returns a mask filled with zeros if no object is found. - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the "object" described by the label and the mask. """ # After deprecation of this is completed, remove the default `None` value for `images` if "images" in kwargs: inputs = kwargs.pop("images") if inputs is None: raise ValueError("Cannot call the image-classification pipeline without an inputs argument!") return super().__call__(inputs, **kwargs)
Perform segmentation (detect masks & classes) in the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing an HTTP(S) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images. subtask (`str`, *optional*): Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model capabilities. If not set, the pipeline will attempt tp resolve in the following order: `panoptic`, `instance`, `semantic`. threshold (`float`, *optional*, defaults to 0.9): Probability threshold to filter out predicted masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5): Mask overlap threshold to eliminate small, disconnected segments. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image. The dictionaries contain the mask, label and score (where applicable) of each detected object and contains the following keys: - **label** (`str`) -- The class label identified by the model. - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of the original image. Returns a mask filled with zeros if no object is found. - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the "object" described by the label and the mask.
__call__
python
huggingface/transformers
src/transformers/pipelines/image_segmentation.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/image_segmentation.py
Apache-2.0
def add_images_to_messages( messages: dict, images: Optional[Union[str, List[str], "Image.Image", List["Image.Image"]]] ): """ Retrieve and combine images from the chat and the images passed as input. """ if images is None: images = [] elif not isinstance(images, Iterable) or isinstance(images, str): images = [images] idx_images = 0 for message in messages: for content in message["content"]: if not isinstance(content, dict): continue content_type = content.get("type") if content_type == "image": if not any(key in content for key in ["image", "url", "path", "base64"]): if idx_images < len(images): # Insert the image passed as argument in the chat message content["image"] = images[idx_images] idx_images += 1 else: raise ValueError( "The number of images in the chat messages should be the same as the number of images passed to the pipeline." ) # Add support for OpenAI/TGI chat format elif content_type == "image_url": if isinstance(content.get("image_url"), dict) and "url" in content["image_url"]: # Rewrite content to be in the Transformers chat format content["type"] = "image" content["image"] = content["image_url"]["url"] del content["image_url"] else: raise ValueError( "Wrong format for 'image_url' content type. The content should have an 'image_url' dict with a 'url' key." ) # The number of images passed should be consistent with the number of images in the chat without an image key if idx_images != len(images): raise ValueError( "The number of images in the chat messages should be the same as the number of images passed to the pipeline." ) return messages
Retrieve and combine images from the chat and the images passed as input.
add_images_to_messages
python
huggingface/transformers
src/transformers/pipelines/image_text_to_text.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/image_text_to_text.py
Apache-2.0
def __call__( self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs ) -> Union["Image.Image", List["Image.Image"]]: """ Transform the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and the call may block forever. Return: An image (Image.Image) or a list of images (List["Image.Image"]) containing result(s). If the input is a single image, the return will be also a single image, if the input is a list of several images, it will return a list of transformed images. """ return super().__call__(images, **kwargs)
Transform the image(s) passed as inputs. Args: images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and the call may block forever. Return: An image (Image.Image) or a list of images (List["Image.Image"]) containing result(s). If the input is a single image, the return will be also a single image, if the input is a list of several images, it will return a list of transformed images.
__call__
python
huggingface/transformers
src/transformers/pipelines/image_to_image.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/image_to_image.py
Apache-2.0
def __call__(self, inputs: Union[str, List[str], "Image.Image", List["Image.Image"]] = None, **kwargs): """ Assign labels to the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a HTTP(s) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. max_new_tokens (`int`, *optional*): The amount of maximum tokens to generate. By default it will use `generate` default. generate_kwargs (`Dict`, *optional*): Pass it to send all of these arguments directly to `generate` allowing full control of this function. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following key: - **generated_text** (`str`) -- The generated text. """ # After deprecation of this is completed, remove the default `None` value for `images` if "images" in kwargs: inputs = kwargs.pop("images") if inputs is None: raise ValueError("Cannot call the image-to-text pipeline without an inputs argument!") return super().__call__(inputs, **kwargs)
Assign labels to the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing a HTTP(s) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. max_new_tokens (`int`, *optional*): The amount of maximum tokens to generate. By default it will use `generate` default. generate_kwargs (`Dict`, *optional*): Pass it to send all of these arguments directly to `generate` allowing full control of this function. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A list or a list of list of `dict`: Each result comes as a dictionary with the following key: - **generated_text** (`str`) -- The generated text.
__call__
python
huggingface/transformers
src/transformers/pipelines/image_to_text.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/image_to_text.py
Apache-2.0
def __call__(self, *args, **kwargs) -> Union[Predictions, List[Prediction]]: """ Detect objects (bounding boxes & classes) in the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing an HTTP(S) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images. threshold (`float`, *optional*, defaults to 0.5): The probability necessary to make a prediction. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image. The dictionaries contain the following keys: - **label** (`str`) -- The class label identified by the model. - **score** (`float`) -- The score attributed by the model for that label. - **box** (`List[Dict[str, int]]`) -- The bounding box of detected object in image's original size. """ # After deprecation of this is completed, remove the default `None` value for `images` if "images" in kwargs and "inputs" not in kwargs: kwargs["inputs"] = kwargs.pop("images") return super().__call__(*args, **kwargs)
Detect objects (bounding boxes & classes) in the image(s) passed as inputs. Args: inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): The pipeline handles three types of images: - A string containing an HTTP(S) link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images. threshold (`float`, *optional*, defaults to 0.5): The probability necessary to make a prediction. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image. The dictionaries contain the following keys: - **label** (`str`) -- The class label identified by the model. - **score** (`float`) -- The score attributed by the model for that label. - **box** (`List[Dict[str, int]]`) -- The bounding box of detected object in image's original size.
__call__
python
huggingface/transformers
src/transformers/pipelines/object_detection.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/object_detection.py
Apache-2.0
def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: """ Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } Args: box (`torch.Tensor`): Tensor containing the coordinates in corners format. Returns: bbox (`Dict[str, int]`): Dict containing the coordinates in corners format. """ if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.") xmin, ymin, xmax, ymax = box.int().tolist() bbox = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } Args: box (`torch.Tensor`): Tensor containing the coordinates in corners format. Returns: bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.
_get_bounding_box
python
huggingface/transformers
src/transformers/pipelines/object_detection.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/object_detection.py
Apache-2.0
def __init__(self, loader, infer, params, loader_batch_size=None): """ Roughly equivalent to ``` for item in loader: yield infer(item, **params) ``` Arguments: loader (`torch.utils.data.DataLoader` or `Iterable`): The iterator that will be used to apply `infer` on. infer (any function): The function to apply of each element of `loader`. params (`dict`): The parameters passed to `infer` along with every item loader_batch_size (`int`, *optional*): If specified, the items of `loader` are supposed to come as batch, and are loader_batched here making it roughly behave as ``` for items in loader: for i in loader_batch_size: item = items[i] yield infer(item, **params) ```""" self.loader = loader self.infer = infer self.params = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether loader_batch_size = None self.loader_batch_size = loader_batch_size # Internal bookkeeping self._loader_batch_index = None self._loader_batch_data = None
Roughly equivalent to ``` for item in loader: yield infer(item, **params) ``` Arguments: loader (`torch.utils.data.DataLoader` or `Iterable`): The iterator that will be used to apply `infer` on. infer (any function): The function to apply of each element of `loader`. params (`dict`): The parameters passed to `infer` along with every item loader_batch_size (`int`, *optional*): If specified, the items of `loader` are supposed to come as batch, and are loader_batched here making it roughly behave as ``` for items in loader: for i in loader_batch_size: item = items[i] yield infer(item, **params) ```
__init__
python
huggingface/transformers
src/transformers/pipelines/pt_utils.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/pt_utils.py
Apache-2.0
def loader_batch_item(self): """ Return item located at `loader_batch_index` within the current `loader_batch_data`. """ if isinstance(self._loader_batch_data, torch.Tensor): # Batch data is simple tensor, just fetch the slice result = self._loader_batch_data[self._loader_batch_index].unsqueeze(0) else: # Batch data is assumed to be BaseModelOutput (or dict) loader_batched = {} for k, element in self._loader_batch_data.items(): if isinstance(element, ModelOutput): # Convert ModelOutput to tuple first element = element.to_tuple() if isinstance(element[0], torch.Tensor): loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0], np.ndarray): loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(element, tuple): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0], torch.Tensor): loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0], np.ndarray): loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) continue if element is None: # This can happen for optional data that get passed around loader_batched[k] = None elif isinstance(element[self._loader_batch_index], torch.Tensor): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers loader_batched[k] = element[self._loader_batch_index].unsqueeze(0) elif isinstance(element[self._loader_batch_index], np.ndarray): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers loader_batched[k] = np.expand_dims(element[self._loader_batch_index], 0) else: # This is typically a list, so no need to `unsqueeze`. loader_batched[k] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 result = self._loader_batch_data.__class__(loader_batched) self._loader_batch_index += 1 return result
Return item located at `loader_batch_index` within the current `loader_batch_data`.
loader_batch_item
python
huggingface/transformers
src/transformers/pipelines/pt_utils.py
https://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/pt_utils.py
Apache-2.0